diff --git a/go.mod b/go.mod index 891e185b8b2..e55642bedda 100644 --- a/go.mod +++ b/go.mod @@ -40,6 +40,7 @@ require ( github.com/gorilla/mux v1.8.0 github.com/gorilla/securecookie v1.1.1 github.com/gorilla/sessions v1.2.1 + github.com/hashicorp/go-multierror v1.1.1 github.com/itchyny/gojq v0.12.13 github.com/jewzaam/go-cosmosdb v0.0.0-20230924011506-8f8942a01991 github.com/jongio/azidext/go/azidext v0.5.0 @@ -56,6 +57,7 @@ require ( github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a + github.com/openshift/cloud-credential-operator v0.0.0-00010101000000-000000000000 github.com/openshift/console-operator v0.0.0-20220407014945-45d37e70e0c2 github.com/openshift/hive/apis v0.0.0 github.com/openshift/library-go v0.0.0-20220525173854-9b950a41acdc @@ -171,7 +173,6 @@ require ( github.com/gorilla/schema v1.2.0 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/itchyny/timefmt-go v0.1.5 // indirect diff --git a/go.sum b/go.sum index cebc382136c..a6bcac8c47e 100644 --- a/go.sum +++ b/go.sum @@ -675,6 +675,8 @@ github.com/openshift/build-machinery-go v0.0.0-20210115170933-e575b44a7a94/go.mo github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/client-go v0.0.0-20220603133046-984ee5ebedcf h1:gAYYPWVduONFJ6yuczLleApk0nEH3W0GgxDX2+O+B9E= github.com/openshift/client-go v0.0.0-20220603133046-984ee5ebedcf/go.mod h1:eDO5QeVi2IiXmDwB0e2z1DpAznWroZKe978pzZwFBzg= +github.com/openshift/cloud-credential-operator v0.0.0-20200316201045-d10080b52c9e h1:2gyl9UVyjHSWzdS56KUXxQwIhENbq2x2olqoMQSA/C8= +github.com/openshift/cloud-credential-operator v0.0.0-20200316201045-d10080b52c9e/go.mod h1:iPn+uhIe7nkP5BMHe2QnbLtg5m/AIQ1xvz9s3cig5ss= github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210626224711-5d94c794092f h1:rQwvVLPZfM5o0USkVY6mrAyJwzMUkhjn9Wz2D5vX81k= github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210626224711-5d94c794092f/go.mod h1:GR+ocB8I+Z7JTSBdO+DMu/diBfH66lRlRpnc1KWysUM= github.com/openshift/console-operator v0.0.0-20220318130441-e44516b9c315 h1:zmwv8TgbOgZ5QoaPhLdOivqg706Z+VyuPs703jNMdrE= diff --git a/pkg/cluster/arooperator.go b/pkg/cluster/arooperator.go index 3b85ee7c6cc..f8b3ef7a613 100644 --- a/pkg/cluster/arooperator.go +++ b/pkg/cluster/arooperator.go @@ -5,6 +5,17 @@ package cluster import ( "context" + + cloudcredentialv1 "github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + CredentialsRequestGroupVersionResource = schema.GroupVersionResource{ + Group: cloudcredentialv1.SchemeGroupVersion.Group, + Version: cloudcredentialv1.SchemeGroupVersion.Version, + Resource: "credentialsrequests", + } ) func (m *manager) isIngressProfileAvailable() bool { @@ -53,3 +64,7 @@ func (m *manager) ensureAROOperatorRunningDesiredVersion(ctx context.Context) (b func (m *manager) renewMDSDCertificate(ctx context.Context) error { return m.aroOperatorDeployer.RenewMDSDCertificate(ctx) } + +func (m *manager) restartAROOperatorMaster(ctx context.Context) error { + return m.aroOperatorDeployer.Restart(ctx, []string{"aro-operator-master"}) +} diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 066b198224d..606aca59bf1 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -18,6 +18,7 @@ import ( mcoclient "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned" "github.com/sirupsen/logrus" extensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "github.com/Azure/ARO-RP/pkg/api" @@ -89,6 +90,7 @@ type manager struct { graph graph.Manager kubernetescli kubernetes.Interface + dynamiccli dynamic.Interface extensionscli extensionsclient.Interface maocli machineclient.Interface mcocli mcoclient.Interface diff --git a/pkg/cluster/clusterserviceprincipal.go b/pkg/cluster/clusterserviceprincipal.go index bdcac38065a..bd26991567b 100644 --- a/pkg/cluster/clusterserviceprincipal.go +++ b/pkg/cluster/clusterserviceprincipal.go @@ -4,12 +4,14 @@ package cluster // Licensed under the Apache License 2.0. import ( + "bytes" "context" "strings" "time" mgmtauthorization "github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-09-01-preview/authorization" "github.com/ghodss/yaml" + corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" applyv1 "k8s.io/client-go/applyconfigurations/core/v1" @@ -77,47 +79,70 @@ func (m *manager) createOrUpdateClusterServicePrincipalRBAC(ctx context.Context) return nil } -func (m *manager) updateAROSecret(ctx context.Context) error { - var changed bool +// cloudConfigSecretFromChanges takes in the kube-system/azure-cloud-provider Secret and a map +// containing cloud-config data. If the cloud-config data in cf is different from what's currently +// in the Secret, cloudConfigSecretFromChanges updates and returns the Secret. Otherwise, it returns nil. +func cloudConfigSecretFromChanges(secret *corev1.Secret, cf map[string]interface{}) (*corev1.Secret, error) { + data, err := yaml.Marshal(cf) + if err != nil { + return nil, err + } + + if !bytes.Equal(secret.Data["cloud-config"], data) { + secret.Data["cloud-config"] = data + return secret, nil + } + + return nil, nil +} + +// servicePrincipalUpdated checks whether the CSP has been updated by comparing the cluster doc's +// ServicePrincipalProfile to the contents of the kube-system/azure-cloud-provider Secret. If the CSP +// has changed, it returns a new corev1.Secret to use to update the Secret to match +// what's in the cluster doc. +func (m *manager) servicePrincipalUpdated(ctx context.Context) (*corev1.Secret, error) { spp := m.doc.OpenShiftCluster.Properties.ServicePrincipalProfile - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - //data: - // cloud-config: - secret, err := m.kubernetescli.CoreV1().Secrets("kube-system").Get(ctx, "azure-cloud-provider", metav1.GetOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { // we are not in control if secret is not present - return nil - } - return err + //data: + // cloud-config: + secret, err := m.kubernetescli.CoreV1().Secrets("kube-system").Get(ctx, "azure-cloud-provider", metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { // we are not in control if secret is not present + return nil, nil } + return nil, err + } - var cf map[string]interface{} - if secret != nil && secret.Data != nil { - err = yaml.Unmarshal(secret.Data["cloud-config"], &cf) - if err != nil { - return err - } - if val, ok := cf["aadClientId"].(string); ok { - if val != spp.ClientID { - cf["aadClientId"] = spp.ClientID - changed = true - } + var cf map[string]interface{} + if secret != nil && secret.Data != nil { + err = yaml.Unmarshal(secret.Data["cloud-config"], &cf) + if err != nil { + return nil, err + } + if val, ok := cf["aadClientId"].(string); ok { + if val != spp.ClientID { + cf["aadClientId"] = spp.ClientID } - if val, ok := cf["aadClientSecret"].(string); ok { - if val != string(spp.ClientSecret) { - cf["aadClientSecret"] = spp.ClientSecret - changed = true - } + } + if val, ok := cf["aadClientSecret"].(string); ok { + if val != string(spp.ClientSecret) { + cf["aadClientSecret"] = spp.ClientSecret } } + } - if changed { - data, err := yaml.Marshal(cf) - if err != nil { - return err - } - secret.Data["cloud-config"] = data + return cloudConfigSecretFromChanges(secret, cf) +} +func (m *manager) updateAROSecret(ctx context.Context) error { + var changed bool + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + secret, err := m.servicePrincipalUpdated(ctx) + changed = secret != nil + if err != nil { + return err + } + + if changed { _, err = m.kubernetescli.CoreV1().Secrets("kube-system").Update(ctx, secret, metav1.UpdateOptions{}) if err != nil { return err diff --git a/pkg/cluster/clusterserviceprincipal_test.go b/pkg/cluster/clusterserviceprincipal_test.go index dfaab85bee1..f160ab67d30 100644 --- a/pkg/cluster/clusterserviceprincipal_test.go +++ b/pkg/cluster/clusterserviceprincipal_test.go @@ -5,6 +5,7 @@ package cluster import ( "context" + "errors" "fmt" "reflect" "testing" @@ -18,11 +19,10 @@ import ( operatorfake "github.com/openshift/client-go/operator/clientset/versioned/fake" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/fake" + fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake" ktesting "k8s.io/client-go/testing" "github.com/Azure/ARO-RP/pkg/api" @@ -31,6 +31,7 @@ import ( mock_features "github.com/Azure/ARO-RP/pkg/util/mocks/azureclient/mgmt/features" "github.com/Azure/ARO-RP/pkg/util/rbac" utilerror "github.com/Azure/ARO-RP/test/util/error" + "github.com/Azure/ARO-RP/test/util/serversideapply" ) const fakeClusterSPObjectId = "00000000-0000-0000-0000-000000000000" @@ -188,6 +189,194 @@ func getFakeAROSecret(clientID, secret string) corev1.Secret { } } +func TestCloudConfigSecretFromChanges(t *testing.T) { + for _, tt := range []struct { + name string + secretIn func() *corev1.Secret + cf map[string]interface{} + wantSecret func() *corev1.Secret + wantErrMsg string + }{ + { + name: "New CSP (client ID and client secret both changed)", + secretIn: func() *corev1.Secret { + secret := getFakeAROSecret("aadClientId", "aadClientSecret") + return &secret + }, + cf: map[string]interface{}{ + "aadClientId": "aadClientIdNew", + "aadClientSecret": "aadClientSecretNew", + }, + wantSecret: func() *corev1.Secret { + secret := getFakeAROSecret("aadClientIdNew", "aadClientSecretNew") + return &secret + }, + }, + { + name: "Updated secret (client ID stayed the same, client secret changed)", + secretIn: func() *corev1.Secret { + secret := getFakeAROSecret("aadClientId", "aadClientSecret") + return &secret + }, + cf: map[string]interface{}{ + "aadClientId": "aadClientId", + "aadClientSecret": "aadClientSecretNew", + }, + wantSecret: func() *corev1.Secret { + secret := getFakeAROSecret("aadClientId", "aadClientSecretNew") + return &secret + }, + }, + { + name: "No errors, nothing changed", + secretIn: func() *corev1.Secret { + secret := getFakeAROSecret("aadClientId", "aadClientSecret") + return &secret + }, + cf: map[string]interface{}{ + "aadClientId": "aadClientId", + "aadClientSecret": "aadClientSecret", + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + secret, err := cloudConfigSecretFromChanges(tt.secretIn(), tt.cf) + if tt.wantSecret != nil { + wantSecret := tt.wantSecret() + if secret == nil { + t.Errorf("Did not return a Secret, but expected the following Secret data: %v", string(wantSecret.Data["cloud-config"])) + } + if !reflect.DeepEqual(secret, wantSecret) { + t.Errorf("\n%+v \n!= \n%+v", string(secret.Data["cloud-config"]), string(wantSecret.Data["cloud-config"])) + } + } else if tt.wantSecret == nil && secret != nil { + t.Errorf("Should not have returned a Secret") + } + utilerror.AssertErrorMessage(t, err, tt.wantErrMsg) + }) + } +} + +func TestServicePrincipalUpdated(t *testing.T) { + ctx := context.Background() + + for _, tt := range []struct { + name string + kubernetescli func() *fake.Clientset + spp api.ServicePrincipalProfile + wantSecret func() *corev1.Secret + wantErrMsg string + }{ + { + name: "Secret not found", + kubernetescli: func() *fake.Clientset { + return fake.NewSimpleClientset() + }, + spp: api.ServicePrincipalProfile{ + ClientID: "aadClientId", + ClientSecret: "aadClientSecretNew", + }, + wantErrMsg: "", + }, + { + name: "Encounter other error getting Secret", + kubernetescli: func() *fake.Clientset { + cli := fake.NewSimpleClientset() + cli.CoreV1().(*fakecorev1.FakeCoreV1).PrependReactor("get", "secrets", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &corev1.Secret{}, errors.New("Error getting Secret") + }) + return cli + }, + spp: api.ServicePrincipalProfile{ + ClientID: "aadClientId", + ClientSecret: "aadClientSecretNew", + }, + wantErrMsg: "Error getting Secret", + }, + { + name: "Unable to unmarshal cloud-config data", + kubernetescli: func() *fake.Clientset { + secret := getFakeAROSecret("aadClientId", "aadClientSecret") + secret.Data["cloud-config"] = []byte("This is some random data that is not going to unmarshal properly!") + return fake.NewSimpleClientset(&secret) + }, + spp: api.ServicePrincipalProfile{ + ClientID: "aadClientId", + ClientSecret: "aadClientSecretNew", + }, + wantErrMsg: "error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}", + }, + { + name: "New CSP (client ID and client secret both changed)", + kubernetescli: func() *fake.Clientset { + secret := getFakeAROSecret("aadClientId", "aadClientSecret") + return fake.NewSimpleClientset(&secret) + }, + spp: api.ServicePrincipalProfile{ + ClientID: "aadClientIdNew", + ClientSecret: "aadClientSecretNew", + }, + wantSecret: func() *corev1.Secret { + secret := getFakeAROSecret("aadClientIdNew", "aadClientSecretNew") + return &secret + }, + }, + { + name: "Updated secret (client ID stayed the same, client secret changed)", + kubernetescli: func() *fake.Clientset { + secret := getFakeAROSecret("aadClientId", "aadClientSecret") + return fake.NewSimpleClientset(&secret) + }, + spp: api.ServicePrincipalProfile{ + ClientID: "aadClientId", + ClientSecret: "aadClientSecretNew", + }, + wantSecret: func() *corev1.Secret { + secret := getFakeAROSecret("aadClientId", "aadClientSecretNew") + return &secret + }, + }, + { + name: "No errors, nothing changed", + kubernetescli: func() *fake.Clientset { + secret := getFakeAROSecret("aadClientId", "aadClientSecret") + return fake.NewSimpleClientset(&secret) + }, + spp: api.ServicePrincipalProfile{ + ClientID: "aadClientId", + ClientSecret: "aadClientSecret", + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + m := &manager{ + kubernetescli: tt.kubernetescli(), + doc: &api.OpenShiftClusterDocument{ + OpenShiftCluster: &api.OpenShiftCluster{ + Properties: api.OpenShiftClusterProperties{ + ServicePrincipalProfile: tt.spp, + }, + }, + }, + } + + secret, err := m.servicePrincipalUpdated(ctx) + if tt.wantSecret != nil { + wantSecret := tt.wantSecret() + if secret == nil { + t.Errorf("Did not return a Secret, but expected the following Secret data: %v", string(wantSecret.Data["cloud-config"])) + } + if !reflect.DeepEqual(secret, wantSecret) { + t.Errorf("\n%+v \n!= \n%+v", string(secret.Data["cloud-config"]), string(wantSecret.Data["cloud-config"])) + } + } else if tt.wantSecret == nil && secret != nil { + t.Errorf("Should not have returned a Secret") + } + utilerror.AssertErrorMessage(t, err, tt.wantErrMsg) + }) + } +} + func TestUpdateAROSecret(t *testing.T) { ctx := context.Background() @@ -335,7 +524,7 @@ func TestUpdateOpenShiftSecret(t *testing.T) { name: "noop", kubernetescli: func() *fake.Clientset { secret := getFakeOpenShiftSecret() - return cliWithApply(&secret) + return serversideapply.CliWithApply([]string{"secrets"}, &secret) }, doc: api.OpenShiftCluster{ Properties: api.OpenShiftClusterProperties{ @@ -360,7 +549,7 @@ func TestUpdateOpenShiftSecret(t *testing.T) { name: "update secret", kubernetescli: func() *fake.Clientset { secret := getFakeOpenShiftSecret() - return cliWithApply(&secret) + return serversideapply.CliWithApply([]string{"secrets"}, &secret) }, doc: api.OpenShiftCluster{ Properties: api.OpenShiftClusterProperties{ @@ -387,7 +576,7 @@ func TestUpdateOpenShiftSecret(t *testing.T) { name: "update tenant", kubernetescli: func() *fake.Clientset { secret := getFakeOpenShiftSecret() - return cliWithApply(&secret) + return serversideapply.CliWithApply([]string{"secrets"}, &secret) }, doc: api.OpenShiftCluster{ Properties: api.OpenShiftClusterProperties{ @@ -413,7 +602,7 @@ func TestUpdateOpenShiftSecret(t *testing.T) { { name: "recreate secret when not found", kubernetescli: func() *fake.Clientset { - return cliWithApply() + return serversideapply.CliWithApply([]string{"secrets"}) }, doc: api.OpenShiftCluster{ Properties: api.OpenShiftClusterProperties{ @@ -458,44 +647,3 @@ func TestUpdateOpenShiftSecret(t *testing.T) { }) } } - -// The current kubernetes testing client does not propery handle Apply actions so we reimplement it here. -// See https://github.com/kubernetes/client-go/issues/1184 for more details. -func cliWithApply(object ...runtime.Object) *fake.Clientset { - fc := fake.NewSimpleClientset(object...) - fc.PrependReactor("patch", "secrets", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { - pa := action.(ktesting.PatchAction) - if pa.GetPatchType() == types.ApplyPatchType { - // Apply patches are supposed to upsert, but fake client fails if the object doesn't exist, - // if an apply patch occurs for a secret that doesn't yet exist, create it. - // However, we already hold the fakeclient lock, so we can't use the front door. - rfunc := ktesting.ObjectReaction(fc.Tracker()) - _, obj, err := rfunc( - ktesting.NewGetAction(pa.GetResource(), pa.GetNamespace(), pa.GetName()), - ) - if kerrors.IsNotFound(err) || obj == nil { - _, _, _ = rfunc( - ktesting.NewCreateAction( - pa.GetResource(), - pa.GetNamespace(), - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: pa.GetName(), - Namespace: pa.GetNamespace(), - }, - }, - ), - ) - } - return rfunc(ktesting.NewPatchAction( - pa.GetResource(), - pa.GetNamespace(), - pa.GetName(), - types.StrategicMergePatchType, - pa.GetPatch())) - } - return false, nil, nil - }, - ) - return fc -} diff --git a/pkg/cluster/condition.go b/pkg/cluster/condition.go index f1bcc17d1bd..a617403a283 100644 --- a/pkg/cluster/condition.go +++ b/pkg/cluster/condition.go @@ -5,6 +5,8 @@ package cluster import ( "context" + "errors" + "time" configv1 "github.com/openshift/api/config/v1" consoleapi "github.com/openshift/console-operator/pkg/api" @@ -93,3 +95,47 @@ func isOperatorAvailable(operator *configv1.ClusterOperator) bool { } return m[configv1.OperatorAvailable] == configv1.ConditionTrue && m[configv1.OperatorProgressing] == configv1.ConditionFalse } + +// aroCredentialsRequestReconciled evaluates whether the openshift-azure-operator CredentialsRequest has recently been reconciled and returns true +// if it has been (or does not need to be under the circumstances) and false otherwise or if an error occurs, where "has recently been reconciled"\ +// is true if the CredentialsRequest has been reconciled within the past 5 minutes. +// Checking for a change to the lastSyncCloudCredsSecretResourceVersion attribute of the CredentialRequest's status would be a neater way of checking +// whether it was reconciled, but we would would have to save the value prior to updating the kube-system/azure-credentials Secret so that we'd have +// and old value to compare to. +func (m *manager) aroCredentialsRequestReconciled(ctx context.Context) (bool, error) { + // If the CSP hasn't been updated, the CredentialsRequest does not need to be reconciled. + secret, err := m.servicePrincipalUpdated(ctx) + if err != nil { + return false, err + } else if secret == nil { + return true, nil + } + + u, err := m.dynamiccli.Resource(CredentialsRequestGroupVersionResource).Namespace("openshift-cloud-credential-operator").Get(ctx, "openshift-azure-operator", metav1.GetOptions{}) + if err != nil { + return false, err + } + + cr := u.UnstructuredContent() + var status map[string]interface{} + if s, ok := cr["status"]; ok { + status = s.(map[string]interface{}) + } else { + return false, errors.New("unable to access status of openshift-azure-operator CredentialsRequest") + } + + var lastSyncTimestamp string + if lst, ok := status["lastSyncTimestamp"]; ok { + lastSyncTimestamp = lst.(string) + } else { + return false, errors.New("unable to access status.lastSyncTimestamp of openshift-azure-operator CredentialsRequest") + } + + timestamp, err := time.Parse(time.RFC3339, lastSyncTimestamp) + if err != nil { + return false, err + } + + timeSinceLastSync := time.Since(timestamp) + return timeSinceLastSync.Minutes() < 5, nil +} diff --git a/pkg/cluster/condition_test.go b/pkg/cluster/condition_test.go index 74b9c30c8e4..412f9a9b9ba 100644 --- a/pkg/cluster/condition_test.go +++ b/pkg/cluster/condition_test.go @@ -6,20 +6,26 @@ package cluster import ( "context" "testing" + "time" "github.com/golang/mock/gomock" configv1 "github.com/openshift/api/config/v1" operatorv1 "github.com/openshift/api/operator/v1" configfake "github.com/openshift/client-go/config/clientset/versioned/fake" operatorfake "github.com/openshift/client-go/operator/clientset/versioned/fake" + cloudcredentialv1 "github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1" consoleapi "github.com/openshift/console-operator/pkg/api" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/kubernetes/scheme" + "github.com/Azure/ARO-RP/pkg/api" "github.com/Azure/ARO-RP/pkg/env" mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env" + utilerror "github.com/Azure/ARO-RP/test/util/error" ) const errMustBeNilMsg = "err must be nil; condition is retried until timeout" @@ -307,3 +313,142 @@ func TestClusterVersionReady(t *testing.T) { } } } + +func TestAroCredentialsRequestReconciled(t *testing.T) { + ctx := context.Background() + + for _, tt := range []struct { + name string + kubernetescli func() *fake.Clientset + dynamiccli func() *dynamicfake.FakeDynamicClient + spp api.ServicePrincipalProfile + want bool + wantErrMsg string + }{ + { + name: "Cluster service principal has not changed", + kubernetescli: func() *fake.Clientset { + secret := getFakeAROSecret("aadClientId", "aadClientSecret") + return fake.NewSimpleClientset(&secret) + }, + dynamiccli: func() *dynamicfake.FakeDynamicClient { + return dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + }, + spp: api.ServicePrincipalProfile{ + ClientID: "aadClientId", + ClientSecret: "aadClientSecret", + }, + want: true, + }, + { + name: "Encounter error getting CredentialsRequest", + kubernetescli: func() *fake.Clientset { + secret := getFakeAROSecret("aadClientId", "aadClientSecret") + return fake.NewSimpleClientset(&secret) + }, + dynamiccli: func() *dynamicfake.FakeDynamicClient { + return dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + }, + spp: api.ServicePrincipalProfile{ + ClientID: "aadClientId", + ClientSecret: "aadClientSecretNew", + }, + want: false, + wantErrMsg: `credentialsrequests.cloudcredential.openshift.io "openshift-azure-operator" not found`, + }, + { + name: "CredentialsRequest is missing status.lastSyncTimestamp", + kubernetescli: func() *fake.Clientset { + secret := getFakeAROSecret("aadClientId", "aadClientSecret") + return fake.NewSimpleClientset(&secret) + }, + dynamiccli: func() *dynamicfake.FakeDynamicClient { + cr := cloudcredentialv1.CredentialsRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "openshift-azure-operator", + Namespace: "openshift-cloud-credential-operator", + }, + } + return dynamicfake.NewSimpleDynamicClient(scheme.Scheme, &cr) + }, + spp: api.ServicePrincipalProfile{ + ClientID: "aadClientId", + ClientSecret: "aadClientSecretNew", + }, + want: false, + wantErrMsg: "unable to access status.lastSyncTimestamp of openshift-azure-operator CredentialsRequest", + }, + { + name: "CredentialsRequest was last synced 10 minutes ago (too long)", + kubernetescli: func() *fake.Clientset { + secret := getFakeAROSecret("aadClientId", "aadClientSecret") + return fake.NewSimpleClientset(&secret) + }, + dynamiccli: func() *dynamicfake.FakeDynamicClient { + timestamp := metav1.NewTime(time.Now().Add(-10 * time.Minute)) + cr := cloudcredentialv1.CredentialsRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "openshift-azure-operator", + Namespace: "openshift-cloud-credential-operator", + }, + Status: cloudcredentialv1.CredentialsRequestStatus{ + LastSyncTimestamp: ×tamp, + }, + } + return dynamicfake.NewSimpleDynamicClient(scheme.Scheme, &cr) + }, + spp: api.ServicePrincipalProfile{ + ClientID: "aadClientId", + ClientSecret: "aadClientSecretNew", + }, + want: false, + }, + { + name: "CredentialsRequest was last synced 10 seconds ago", + kubernetescli: func() *fake.Clientset { + secret := getFakeAROSecret("aadClientId", "aadClientSecret") + return fake.NewSimpleClientset(&secret) + }, + dynamiccli: func() *dynamicfake.FakeDynamicClient { + timestamp := metav1.NewTime(time.Now().Add(-10 * time.Second)) + cr := cloudcredentialv1.CredentialsRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "openshift-azure-operator", + Namespace: "openshift-cloud-credential-operator", + }, + Status: cloudcredentialv1.CredentialsRequestStatus{ + LastSyncTimestamp: ×tamp, + }, + } + return dynamicfake.NewSimpleDynamicClient(scheme.Scheme, &cr) + }, + spp: api.ServicePrincipalProfile{ + ClientID: "aadClientId", + ClientSecret: "aadClientSecretNew", + }, + want: true, + }, + } { + t.Run(tt.name, func(t *testing.T) { + m := &manager{ + log: logrus.NewEntry(logrus.StandardLogger()), + kubernetescli: tt.kubernetescli(), + dynamiccli: tt.dynamiccli(), + doc: &api.OpenShiftClusterDocument{ + OpenShiftCluster: &api.OpenShiftCluster{ + Properties: api.OpenShiftClusterProperties{ + ServicePrincipalProfile: tt.spp, + }, + }, + }, + } + + result, err := m.aroCredentialsRequestReconciled(ctx) + if result != tt.want { + t.Errorf("Result was %v, wanted %v", result, tt.want) + } + + utilerror.AssertErrorMessage(t, err, tt.wantErrMsg) + }) + } +} diff --git a/pkg/cluster/install.go b/pkg/cluster/install.go index a20ba9a5593..c08a823601a 100644 --- a/pkg/cluster/install.go +++ b/pkg/cluster/install.go @@ -17,6 +17,7 @@ import ( securityclient "github.com/openshift/client-go/security/clientset/versioned" mcoclient "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned" extensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "github.com/Azure/ARO-RP/pkg/api" @@ -200,7 +201,10 @@ func (m *manager) Update(ctx context.Context) error { steps.Action(m.configureIngressCertificate), steps.Action(m.renewMDSDCertificate), steps.Action(m.updateOpenShiftSecret), + steps.Condition(m.aroCredentialsRequestReconciled, 3*time.Minute, true), steps.Action(m.updateAROSecret), + steps.Action(m.restartAROOperatorMaster), // depends on m.updateOpenShiftSecret; the point of restarting is to pick up any changes made to the secret + steps.Condition(m.aroDeploymentReady, 5*time.Minute, true), steps.Action(m.reconcileLoadBalancerProfile), } @@ -448,6 +452,11 @@ func (m *manager) initializeKubernetesClients(ctx context.Context) error { return err } + m.dynamiccli, err = dynamic.NewForConfig(restConfig) + if err != nil { + return err + } + m.extensionscli, err = extensionsclient.NewForConfig(restConfig) if err != nil { return err diff --git a/pkg/operator/controllers/checkers/serviceprincipalchecker/checker.go b/pkg/operator/controllers/checkers/serviceprincipalchecker/checker.go index 7705372b13d..c007259c17d 100644 --- a/pkg/operator/controllers/checkers/serviceprincipalchecker/checker.go +++ b/pkg/operator/controllers/checkers/serviceprincipalchecker/checker.go @@ -22,8 +22,7 @@ type servicePrincipalChecker interface { type checker struct { log *logrus.Entry - credentials func(ctx context.Context) (*clusterauthorizer.Credentials, error) - getTokenCredential func(azEnv *azureclient.AROEnvironment, credentials *clusterauthorizer.Credentials) (azcore.TokenCredential, error) + getTokenCredential func(azEnv *azureclient.AROEnvironment) (azcore.TokenCredential, error) newSPValidator func(azEnv *azureclient.AROEnvironment) dynamic.ServicePrincipalValidator } @@ -31,9 +30,6 @@ func newServicePrincipalChecker(log *logrus.Entry, client client.Client) *checke return &checker{ log: log, - credentials: func(ctx context.Context) (*clusterauthorizer.Credentials, error) { - return clusterauthorizer.AzCredentials(ctx, client) - }, getTokenCredential: clusterauthorizer.GetTokenCredential, newSPValidator: func(azEnv *azureclient.AROEnvironment) dynamic.ServicePrincipalValidator { return dynamic.NewServicePrincipalValidator(log, azEnv, dynamic.AuthorizerClusterServicePrincipal) @@ -47,14 +43,9 @@ func (r *checker) Check(ctx context.Context, AZEnvironment string) error { return err } - azCred, err := r.credentials(ctx) - if err != nil { - return err - } - spDynamic := r.newSPValidator(&azEnv) - spTokenCredential, err := r.getTokenCredential(&azEnv, azCred) + spTokenCredential, err := r.getTokenCredential(&azEnv) if err != nil { return err } diff --git a/pkg/operator/controllers/checkers/serviceprincipalchecker/checker_test.go b/pkg/operator/controllers/checkers/serviceprincipalchecker/checker_test.go index ebf15f5f6a1..9505151d940 100644 --- a/pkg/operator/controllers/checkers/serviceprincipalchecker/checker_test.go +++ b/pkg/operator/controllers/checkers/serviceprincipalchecker/checker_test.go @@ -16,7 +16,6 @@ import ( "github.com/sirupsen/logrus" "github.com/Azure/ARO-RP/pkg/util/azureclient" - "github.com/Azure/ARO-RP/pkg/util/clusterauthorizer" mock_dynamic "github.com/Azure/ARO-RP/pkg/util/mocks/dynamic" "github.com/Azure/ARO-RP/pkg/validate/dynamic" utilerror "github.com/Azure/ARO-RP/test/util/error" @@ -36,11 +35,6 @@ func (c fakeTokenCredential) GetToken(ctx context.Context, options policy.TokenR func TestCheck(t *testing.T) { ctx := context.Background() log := logrus.NewEntry(logrus.StandardLogger()) - mockCredentials := &clusterauthorizer.Credentials{ - ClientID: []byte("fake-client-id"), - ClientSecret: []byte("fake-client-secret"), - TenantID: []byte("fake-tenant-id"), - } for _, tt := range []struct { name string @@ -68,10 +62,6 @@ func TestCheck(t *testing.T) { }, wantErr: "fake validation error", }, - { - name: "could not get service principal credentials", - wantErr: "fake credentials get error", - }, } { t.Run(tt.name, func(t *testing.T) { controller := gomock.NewController(t) @@ -84,13 +74,7 @@ func TestCheck(t *testing.T) { sp := &checker{ log: log, - credentials: func(ctx context.Context) (*clusterauthorizer.Credentials, error) { - if tt.credentialsExist { - return mockCredentials, nil - } - return nil, errors.New("fake credentials get error") - }, - getTokenCredential: func(*azureclient.AROEnvironment, *clusterauthorizer.Credentials) (azcore.TokenCredential, error) { + getTokenCredential: func(*azureclient.AROEnvironment) (azcore.TokenCredential, error) { return &fakeTokenCredential{}, nil }, newSPValidator: func(azEnv *azureclient.AROEnvironment) dynamic.ServicePrincipalValidator { diff --git a/pkg/operator/deploy/deploy.go b/pkg/operator/deploy/deploy.go index 082327e0fbe..59ebe8dd424 100644 --- a/pkg/operator/deploy/deploy.go +++ b/pkg/operator/deploy/deploy.go @@ -13,6 +13,7 @@ import ( "text/template" "time" + "github.com/hashicorp/go-multierror" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" extensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -37,6 +38,7 @@ import ( aroclient "github.com/Azure/ARO-RP/pkg/operator/clientset/versioned" "github.com/Azure/ARO-RP/pkg/operator/controllers/genevalogging" "github.com/Azure/ARO-RP/pkg/util/dynamichelper" + utilkubernetes "github.com/Azure/ARO-RP/pkg/util/kubernetes" utilpem "github.com/Azure/ARO-RP/pkg/util/pem" "github.com/Azure/ARO-RP/pkg/util/pullsecret" "github.com/Azure/ARO-RP/pkg/util/ready" @@ -49,6 +51,7 @@ var embeddedFiles embed.FS type Operator interface { CreateOrUpdate(context.Context) error IsReady(context.Context) (bool, error) + Restart(context.Context, []string) error IsRunningDesiredVersion(context.Context) (bool, error) RenewMDSDCertificate(context.Context) error } @@ -405,6 +408,18 @@ func (o *operator) IsReady(ctx context.Context) (bool, error) { return true, nil } +func (o *operator) Restart(ctx context.Context, deploymentNames []string) error { + var result error + for _, dn := range deploymentNames { + err := utilkubernetes.Restart(ctx, o.kubernetescli.AppsV1().Deployments(pkgoperator.Namespace), pkgoperator.Namespace, dn) + if err != nil { + result = multierror.Append(result, err) + } + } + + return result +} + func checkOperatorDeploymentVersion(ctx context.Context, cli appsv1client.DeploymentInterface, name string, desiredVersion string) (bool, error) { d, err := cli.Get(ctx, name, metav1.GetOptions{}) switch { diff --git a/pkg/operator/deploy/staticresources/credentialsrequest.yaml b/pkg/operator/deploy/staticresources/credentialsrequest.yaml new file mode 100644 index 00000000000..d830ec74555 --- /dev/null +++ b/pkg/operator/deploy/staticresources/credentialsrequest.yaml @@ -0,0 +1,24 @@ +apiVersion: cloudcredential.openshift.io/v1 +kind: CredentialsRequest +metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + finalizers: + - cloudcredential.openshift.io/deprovision + generation: 1 + labels: + controller-tools.k8s.io: "1.0" + name: openshift-azure-operator + namespace: openshift-cloud-credential-operator +spec: + providerSpec: + apiVersion: cloudcredential.openshift.io/v1 + kind: AzureProviderSpec + roleBindings: + - role: Contributor + secretRef: + name: azure-cloud-credentials + namespace: openshift-azure-operator + serviceAccountNames: + - aro-operator-master diff --git a/pkg/operator/deploy/staticresources/master/deployment.yaml.tmpl b/pkg/operator/deploy/staticresources/master/deployment.yaml.tmpl index 455e8210758..6746ab94889 100644 --- a/pkg/operator/deploy/staticresources/master/deployment.yaml.tmpl +++ b/pkg/operator/deploy/staticresources/master/deployment.yaml.tmpl @@ -28,8 +28,23 @@ spec: - master image: "{{ .Image }}" name: aro-operator - {{ if .IsLocalDevelopment}} env: + - name: AZURE_CLIENT_ID + valueFrom: + secretKeyRef: + name: azure-cloud-credentials + key: azure_client_id + - name: AZURE_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: azure-cloud-credentials + key: azure_client_secret + - name: AZURE_TENANT_ID + valueFrom: + secretKeyRef: + name: azure-cloud-credentials + key: azure_tenant_id + {{ if .IsLocalDevelopment}} - name: "RP_MODE" value: "development" {{ end }} diff --git a/pkg/util/azureclient/environments.go b/pkg/util/azureclient/environments.go index acae0668a6b..fe29ed97add 100644 --- a/pkg/util/azureclient/environments.go +++ b/pkg/util/azureclient/environments.go @@ -116,6 +116,14 @@ func (e *AROEnvironment) ClientSecretCredentialOptions() *azidentity.ClientSecre } } +func (e *AROEnvironment) DefaultAzureCredentialOptions() *azidentity.DefaultAzureCredentialOptions { + return &azidentity.DefaultAzureCredentialOptions{ + ClientOptions: azcore.ClientOptions{ + Cloud: e.Cloud, + }, + } +} + func (e *AROEnvironment) EnvironmentCredentialOptions() *azidentity.EnvironmentCredentialOptions { return &azidentity.EnvironmentCredentialOptions{ ClientOptions: azcore.ClientOptions{ diff --git a/pkg/util/azureclient/mgmt/redhatopenshift/2022-09-04/redhatopenshift/openshiftclusters_addons.go b/pkg/util/azureclient/mgmt/redhatopenshift/2022-09-04/redhatopenshift/openshiftclusters_addons.go index 112249de3bc..222d5844af5 100644 --- a/pkg/util/azureclient/mgmt/redhatopenshift/2022-09-04/redhatopenshift/openshiftclusters_addons.go +++ b/pkg/util/azureclient/mgmt/redhatopenshift/2022-09-04/redhatopenshift/openshiftclusters_addons.go @@ -12,6 +12,7 @@ import ( // OpenShiftClustersClientAddons contains addons for OpenShiftClustersClient type OpenShiftClustersClientAddons interface { CreateOrUpdateAndWait(ctx context.Context, resourceGroupName string, resourceName string, parameters mgmtredhatopenshift20220904.OpenShiftCluster) error + UpdateAndWait(ctx context.Context, resourceGroupName string, resourceName string, parameters mgmtredhatopenshift20220904.OpenShiftClusterUpdate) error DeleteAndWait(ctx context.Context, resourceGroupName string, resourceName string) error List(ctx context.Context) (clusters []mgmtredhatopenshift20220904.OpenShiftCluster, err error) ListByResourceGroup(ctx context.Context, resourceGroupName string) (clusters []mgmtredhatopenshift20220904.OpenShiftCluster, err error) @@ -25,6 +26,14 @@ func (c *openShiftClustersClient) CreateOrUpdateAndWait(ctx context.Context, res return future.WaitForCompletionRef(ctx, c.Client) } +func (c *openShiftClustersClient) UpdateAndWait(ctx context.Context, resourceGroupName string, resourceName string, parameters mgmtredhatopenshift20220904.OpenShiftClusterUpdate) error { + future, err := c.Update(ctx, resourceGroupName, resourceName, parameters) + if err != nil { + return err + } + + return future.WaitForCompletionRef(ctx, c.Client) +} func (c *openShiftClustersClient) DeleteAndWait(ctx context.Context, resourceGroupName string, resourceName string) error { future, err := c.Delete(ctx, resourceGroupName, resourceName) diff --git a/pkg/util/clusterauthorizer/authorizer.go b/pkg/util/clusterauthorizer/authorizer.go index e654ad702f1..1ee120cc1b3 100644 --- a/pkg/util/clusterauthorizer/authorizer.go +++ b/pkg/util/clusterauthorizer/authorizer.go @@ -13,8 +13,6 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/jongio/azidext/go/azidext" "github.com/sirupsen/logrus" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/Azure/ARO-RP/pkg/api" @@ -32,7 +30,7 @@ type azRefreshableAuthorizer struct { azureEnvironment *azureclient.AROEnvironment client client.Client - getTokenCredential func(*azureclient.AROEnvironment, *Credentials) (azcore.TokenCredential, error) + getTokenCredential func(*azureclient.AROEnvironment) (azcore.TokenCredential, error) } // NewAzRefreshableAuthorizer returns a new refreshable authorizer @@ -53,14 +51,7 @@ func NewAzRefreshableAuthorizer(log *logrus.Entry, azEnv *azureclient.AROEnviron } func (a *azRefreshableAuthorizer) NewRefreshableAuthorizerToken(ctx context.Context) (autorest.Authorizer, error) { - // Grab azure-credentials from secret - credentials, err := AzCredentials(ctx, a.client) - if err != nil { - return nil, err - } - - // Create service principal token from azure-credentials - tokenCredential, err := a.getTokenCredential(a.azureEnvironment, credentials) + tokenCredential, err := a.getTokenCredential(a.azureEnvironment) if err != nil { return nil, api.NewCloudError(http.StatusBadRequest, api.CloudErrorCodeInvalidServicePrincipalCredentials, "properties.servicePrincipalProfile", "the provided service principal is invalid") } @@ -70,31 +61,6 @@ func (a *azRefreshableAuthorizer) NewRefreshableAuthorizerToken(ctx context.Cont return azidext.NewTokenCredentialAdapter(tokenCredential, scopes), nil } -func GetTokenCredential(environment *azureclient.AROEnvironment, credentials *Credentials) (azcore.TokenCredential, error) { - return azidentity.NewClientSecretCredential( - string(credentials.TenantID), - string(credentials.ClientID), - string(credentials.ClientSecret), - environment.ClientSecretCredentialOptions()) -} - -// AzCredentials gets Cluster Service Principal credentials from the Kubernetes secrets -func AzCredentials(ctx context.Context, client client.Client) (*Credentials, error) { - clusterSPSecret := &corev1.Secret{} - err := client.Get(ctx, types.NamespacedName{Namespace: AzureCredentialSecretNameSpace, Name: AzureCredentialSecretName}, clusterSPSecret) - if err != nil { - return nil, err - } - - for _, key := range []string{"azure_client_id", "azure_client_secret", "azure_tenant_id"} { - if _, ok := clusterSPSecret.Data[key]; !ok { - return nil, fmt.Errorf("%s does not exist in the secret", key) - } - } - - return &Credentials{ - ClientID: clusterSPSecret.Data["azure_client_id"], - ClientSecret: clusterSPSecret.Data["azure_client_secret"], - TenantID: clusterSPSecret.Data["azure_tenant_id"], - }, nil +func GetTokenCredential(environment *azureclient.AROEnvironment) (azcore.TokenCredential, error) { + return azidentity.NewDefaultAzureCredential(environment.DefaultAzureCredentialOptions()) } diff --git a/pkg/util/clusterauthorizer/authorizer_test.go b/pkg/util/clusterauthorizer/authorizer_test.go index 1a46f0bef67..83f22e8a1d0 100644 --- a/pkg/util/clusterauthorizer/authorizer_test.go +++ b/pkg/util/clusterauthorizer/authorizer_test.go @@ -4,13 +4,8 @@ package clusterauthorizer // Licensed under the Apache License 2.0. import ( - "context" "testing" - "time" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - jwt "github.com/golang-jwt/jwt/v4" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -20,12 +15,6 @@ import ( utilerror "github.com/Azure/ARO-RP/test/util/error" ) -type tokenRequirements struct { - clientSecret string - claims jwt.MapClaims - signingMethod jwt.SigningMethod -} - var ( azureSecretName = "azure-credentials" nameSpace = "kube-system" @@ -67,167 +56,6 @@ func TestNewAzRefreshableAuthorizer(t *testing.T) { } } -func TestNewRefreshableAuthorizerToken(t *testing.T) { - ctx := context.Background() - - log := logrus.NewEntry(logrus.StandardLogger()) - - for _, tt := range []struct { - name string - secret *corev1.Secret - tr *tokenRequirements - wantErr string - }{ - { - name: "fail: Missing client secret", - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: azureSecretName, - Namespace: nameSpace, - }, - Data: map[string][]byte{ - "azure_client_id": []byte("client-id"), - "azure_tenant_id": []byte("tenant-id.example.com"), - }, - }, - wantErr: "azure_client_secret does not exist in the secret", - }, - { - name: "pass: create new bearer authorizer token", - tr: &tokenRequirements{ - clientSecret: "my-secret", - signingMethod: jwt.SigningMethodHS256, - }, - secret: newV1CoreSecret(azureSecretName, nameSpace), - }, - } { - clientFake := ctrlfake.NewClientBuilder().WithObjects(tt.secret).Build() - - azRefreshAuthorizer, err := NewAzRefreshableAuthorizer(log, &azureclient.PublicCloud, clientFake) - if err != nil { - t.Errorf("failed to create azRefreshAuthorizer, %v", err) - } - azRefreshAuthorizer.getTokenCredential = func(*azureclient.AROEnvironment, *Credentials) (azcore.TokenCredential, error) { - return tt.tr, nil - } - - t.Run(tt.name, func(t *testing.T) { - token, err := azRefreshAuthorizer.NewRefreshableAuthorizerToken(ctx) - if err != nil && err.Error() != tt.wantErr || - err == nil && tt.wantErr != "" { - t.Logf("Token: %v", token) - t.Errorf("\n%v\n !=\n%v", err, tt.wantErr) - } - }) - } -} - -func TestAzCredentials(t *testing.T) { - ctx := context.Background() - - var ( - azureSecretName = "azure-credentials" - nameSpace = "kube-system" - ) - for _, tt := range []struct { - name string - secret *corev1.Secret - wantErr string - }{ - { - name: "fail: Missing clientID", - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: azureSecretName, - Namespace: nameSpace, - }, - Data: map[string][]byte{ - "azure_client_secret": []byte("client-secret"), - "azure_tenant_id": []byte("tenant-id.example.com"), - }, - }, - wantErr: "azure_client_id does not exist in the secret", - }, - { - name: "fail: missing tenantID", - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: azureSecretName, - Namespace: nameSpace, - }, - Data: map[string][]byte{ - "azure_client_secret": []byte("client-secret"), - "azure_client_id": []byte("client-id"), - }, - }, - wantErr: "azure_tenant_id does not exist in the secret", - }, - { - name: "fail: missing secret", - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: azureSecretName, - Namespace: nameSpace, - }, - Data: map[string][]byte{ - "azure_client_id": []byte("client-id"), - "azure_tenant_id": []byte("tenant-id.example.com"), - }, - }, - wantErr: "azure_client_secret does not exist in the secret", - }, - { - name: "fail: wrong namespace", - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: azureSecretName, - Namespace: "default", - }, - Data: map[string][]byte{ - "azure_client_secret": []byte("client-secret"), - "azure_client_id": []byte("client-id"), - "azure_tenant_id": []byte("tenant-id.example.com"), - }, - }, - wantErr: "secrets \"azure-credentials\" not found", - }, - { - name: "pass: all credential properties", - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: azureSecretName, - Namespace: nameSpace, - }, - Data: map[string][]byte{ - "azure_client_secret": []byte("client-secret"), - "azure_client_id": []byte("client-id"), - "azure_tenant_id": []byte("tenant-id.example.com"), - }, - }, - }, - } { - clientFake := ctrlfake.NewClientBuilder().WithObjects(tt.secret).Build() - - t.Run(tt.name, func(t *testing.T) { - _, err := AzCredentials(ctx, clientFake) - utilerror.AssertErrorMessage(t, err, tt.wantErr) - }) - } -} - -// GetToken allows tokenRequirements to be used as an azcore.TokenCredential. -func (tr *tokenRequirements) GetToken(ctx context.Context, options policy.TokenRequestOptions) (azcore.AccessToken, error) { - token, err := jwt.NewWithClaims(tr.signingMethod, tr.claims).SignedString([]byte(tr.clientSecret)) - if err != nil { - return azcore.AccessToken{}, err - } - - return azcore.AccessToken{ - Token: token, - ExpiresOn: time.Now().Add(10 * time.Minute), - }, nil -} - func newV1CoreSecret(azSecretName, ns string) *corev1.Secret { return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/util/kubernetes/deployments.go b/pkg/util/kubernetes/deployments.go new file mode 100644 index 00000000000..5f63b71c239 --- /dev/null +++ b/pkg/util/kubernetes/deployments.go @@ -0,0 +1,29 @@ +package kubernetes + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "context" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" + appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1" +) + +// Restart restarts the given Deployment by performing the equivalent of a `kubectl rollout restart deployment ` against it. +// Note that Restart needs the namespace of the Deployment as a separate argument even though the namespace is already "encapsulated" +// by the DeploymentInterface (and the namespace you pass should be the same one used to create the DeploymentInterface. +func Restart(ctx context.Context, cli appsv1client.DeploymentInterface, namespace string, deploymentName string) error { + dac := appsv1.Deployment(deploymentName, namespace).WithSpec( + appsv1.DeploymentSpec().WithTemplate( + corev1.PodTemplateSpec().WithAnnotations(map[string]string{ + "kubectl.kubernetes.io/restartedAt": time.Now().Format(time.RFC3339), + }), + ), + ) + _, err := cli.Apply(ctx, dac, metav1.ApplyOptions{FieldManager: "aro-rp", Force: true}) + return err +} diff --git a/pkg/util/kubernetes/deployments_test.go b/pkg/util/kubernetes/deployments_test.go new file mode 100644 index 00000000000..a5760fa02d1 --- /dev/null +++ b/pkg/util/kubernetes/deployments_test.go @@ -0,0 +1,86 @@ +package kubernetes + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "context" + "testing" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1" + + utilerror "github.com/Azure/ARO-RP/test/util/error" + "github.com/Azure/ARO-RP/test/util/serversideapply" +) + +func TestRestart(t *testing.T) { + ctx := context.Background() + + for _, tt := range []struct { + name string + deployment *appsv1.Deployment + deploymentCli func(*fake.Clientset) appsv1client.DeploymentInterface + deploymentName string + deploymentNamespace string + wantErrMsg string + }{ + { + name: "Success", + deployment: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "aro-operator-master", + Namespace: "openshift-azure-operator", + }, + }, + deploymentCli: func(clientset *fake.Clientset) appsv1client.DeploymentInterface { + return clientset.AppsV1().Deployments("openshift-azure-operator") + }, + deploymentName: "aro-operator-master", + deploymentNamespace: "openshift-azure-operator", + }, + { + name: "Caller passed a DeploymentInterface whose namespace doesn't match the one passed", + deployment: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "aro-operator-master", + Namespace: "openshift-azure-operator", + }, + }, + deploymentCli: func(clientset *fake.Clientset) appsv1client.DeploymentInterface { + return clientset.AppsV1().Deployments("azure-operator") + }, + deploymentName: "aro-operator-master", + deploymentNamespace: "openshift-azure-operator", + wantErrMsg: `request namespace does not match object namespace, request: "azure-operator" object: "openshift-azure-operator"`, + }, + } { + t.Run(tt.name, func(t *testing.T) { + clientset := serversideapply.CliWithApply([]string{"deployments"}, tt.deployment) + err := Restart(ctx, tt.deploymentCli(clientset), tt.deploymentNamespace, tt.deploymentName) + utilerror.AssertErrorMessage(t, err, tt.wantErrMsg) + + d, _ := clientset.AppsV1().Deployments(tt.deploymentNamespace).Get(ctx, tt.deploymentName, metav1.GetOptions{}) + + // Checking for this annotation to be here is consistent with how Kubernetes really behaves; + // even after the Deployment is done with the restart, the annotation remains. + if err == nil { + foundRestartAnnotation := false + if d.Spec.Template.Annotations != nil { + for a := range d.Spec.Template.Annotations { + if a == "kubectl.kubernetes.io/restartedAt" { + foundRestartAnnotation = true + break + } + } + } + + if !foundRestartAnnotation { + t.Errorf("Expected restart annotation is missing from Deployment") + } + } + }) + } +} diff --git a/pkg/util/mocks/operator/deploy/deploy.go b/pkg/util/mocks/operator/deploy/deploy.go index 4b42b560909..df12bfbb3b8 100644 --- a/pkg/util/mocks/operator/deploy/deploy.go +++ b/pkg/util/mocks/operator/deploy/deploy.go @@ -91,3 +91,17 @@ func (mr *MockOperatorMockRecorder) RenewMDSDCertificate(arg0 interface{}) *gomo mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RenewMDSDCertificate", reflect.TypeOf((*MockOperator)(nil).RenewMDSDCertificate), arg0) } + +// Restart mocks base method. +func (m *MockOperator) Restart(arg0 context.Context, arg1 []string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Restart", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Restart indicates an expected call of Restart. +func (mr *MockOperatorMockRecorder) Restart(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Restart", reflect.TypeOf((*MockOperator)(nil).Restart), arg0, arg1) +} diff --git a/pkg/util/scheme/scheme.go b/pkg/util/scheme/scheme.go index a414f7162e4..d28a12b045d 100644 --- a/pkg/util/scheme/scheme.go +++ b/pkg/util/scheme/scheme.go @@ -11,6 +11,7 @@ import ( machinev1beta1 "github.com/openshift/api/machine/v1beta1" operatorv1 "github.com/openshift/api/operator/v1" securityv1 "github.com/openshift/api/security/v1" + cloudcredentialv1 "github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1" hivev1 "github.com/openshift/hive/apis/hive/v1" mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" @@ -47,6 +48,7 @@ func init() { utilruntime.Must(consolev1.AddToScheme(scheme.Scheme)) utilruntime.Must(monitoringv1.AddToScheme(scheme.Scheme)) utilruntime.Must(operatorv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(cloudcredentialv1.AddToScheme(scheme.Scheme)) // AzureMachineProviderSpec is not registered by default scheme.Scheme.AddKnownTypes(machinev1beta1.GroupVersion, &machinev1beta1.AzureMachineProviderSpec{}) // AzureMachineProviderSpec type has been deleted from sigs.k8s.io/cluster-api-provider-azure. diff --git a/test/e2e/update.go b/test/e2e/update.go index 756935287cb..068a7f51ce7 100644 --- a/test/e2e/update.go +++ b/test/e2e/update.go @@ -15,11 +15,39 @@ import ( "github.com/Azure/go-autorest/autorest/to" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + mgmtredhatopenshift20220904 "github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2022-09-04/redhatopenshift" mgmtredhatopenshift20230701preview "github.com/Azure/ARO-RP/pkg/client/services/redhatopenshift/mgmt/2023-07-01-preview/redhatopenshift" "github.com/Azure/ARO-RP/pkg/util/stringutils" ) var _ = Describe("Update clusters", func() { + It("must restart the aro-operator-master Deployment", func(ctx context.Context) { + By("saving the current revision of the aro-operator-master Deployment") + d, err := clients.Kubernetes.AppsV1().Deployments("openshift-azure-operator").Get(ctx, "aro-operator-master", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(d.ObjectMeta.Annotations).To(HaveKey("deployment.kubernetes.io/revision")) + + oldRevision, err := strconv.Atoi(d.ObjectMeta.Annotations["deployment.kubernetes.io/revision"]) + Expect(err).NotTo(HaveOccurred()) + + By("sending the PATCH request to update the cluster") + err = clients.OpenshiftClusters.UpdateAndWait(ctx, vnetResourceGroup, clusterName, mgmtredhatopenshift20220904.OpenShiftClusterUpdate{}) + Expect(err).NotTo(HaveOccurred()) + + By("checking that the aro-operator-master Deployment was restarted") + d, err = clients.Kubernetes.AppsV1().Deployments("openshift-azure-operator").Get(ctx, "aro-operator-master", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(d.Spec.Template.Annotations).To(HaveKey("kubectl.kubernetes.io/restartedAt")) + + Expect(d.ObjectMeta.Annotations).To(HaveKey("deployment.kubernetes.io/revision")) + + newRevision, err := strconv.Atoi(d.ObjectMeta.Annotations["deployment.kubernetes.io/revision"]) + Expect(err).NotTo(HaveOccurred()) + Expect(newRevision).To(Equal(oldRevision + 1)) + }) + // This tests the API which is most commonly generated by // az resource tag --tags key=value --ids /subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.RedHatOpenShift/openShiftClusters/xxx It("must be possible to set tags on a cluster resource via PUT", func(ctx context.Context) { diff --git a/test/util/serversideapply/serversideapply.go b/test/util/serversideapply/serversideapply.go new file mode 100644 index 00000000000..97d395fec9c --- /dev/null +++ b/test/util/serversideapply/serversideapply.go @@ -0,0 +1,93 @@ +package serversideapply + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/fake" + ktesting "k8s.io/client-go/testing" +) + +// The current kubernetes testing client does not propery handle Apply actions so we reimplement it here. +// See https://github.com/kubernetes/client-go/issues/1184 for more details. +func CliWithApply(objectTypes []string, object ...runtime.Object) *fake.Clientset { + // This slice will have to be kept up-to-date with the switch statement + // further down inside the for loop... + supportedObjectTypes := []string{ + "secrets", + "deployments", + } + + fc := fake.NewSimpleClientset(object...) + for _, ot := range objectTypes { + foundOt := false + for _, sot := range supportedObjectTypes { + if ot == sot { + foundOt = true + break + } + } + + if !foundOt { + panic(fmt.Sprintf("Kubernetes object type %s needs to be added to ARO-RP/test/util/serversideapply's CliWithApply (see doc comment on function for context)", ot)) + } + + fc.PrependReactor("patch", ot, func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + pa := action.(ktesting.PatchAction) + if pa.GetPatchType() == types.ApplyPatchType { + // Apply patches are supposed to upsert, but fake client fails if the object doesn't exist, + // if an apply patch occurs for a secret that doesn't yet exist, create it. + // However, we already hold the fakeclient lock, so we can't use the front door. + rfunc := ktesting.ObjectReaction(fc.Tracker()) + _, obj, err := rfunc( + ktesting.NewGetAction(pa.GetResource(), pa.GetNamespace(), pa.GetName()), + ) + + var blankObject runtime.Object + switch ot { + case "secrets": + blankObject = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pa.GetName(), + Namespace: pa.GetNamespace(), + }, + } + case "deployments": + blankObject = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: pa.GetName(), + Namespace: pa.GetNamespace(), + }, + } + } + + if kerrors.IsNotFound(err) || obj == nil { + _, _, _ = rfunc( + ktesting.NewCreateAction( + pa.GetResource(), + pa.GetNamespace(), + blankObject, + ), + ) + } + return rfunc(ktesting.NewPatchAction( + pa.GetResource(), + pa.GetNamespace(), + pa.GetName(), + types.StrategicMergePatchType, + pa.GetPatch())) + } + return false, nil, nil + }, + ) + } + return fc +} diff --git a/vendor/github.com/openshift/cloud-credential-operator/LICENSE b/vendor/github.com/openshift/cloud-credential-operator/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/openshift/cloud-credential-operator/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/aws_types.go b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/aws_types.go new file mode 100644 index 00000000000..fbb295b06c3 --- /dev/null +++ b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/aws_types.go @@ -0,0 +1,51 @@ +/* +Copyright 2018 The OpenShift Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TODO: these types should eventually be broken out, along with the actuator, to a separate repo. + +// AWSProviderSpec contains the required information to create a user policy in AWS. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type AWSProviderSpec struct { + metav1.TypeMeta `json:",inline"` + // StatementEntries contains a list of policy statements that should be associated with this credentials access key. + StatementEntries []StatementEntry `json:"statementEntries"` +} + +// StatementEntry models an AWS policy statement entry. +type StatementEntry struct { + // Effect indicates if this policy statement is to Allow or Deny. + Effect string `json:"effect"` + // Action describes the particular AWS service actions that should be allowed or denied. (i.e. ec2:StartInstances, iam:ChangePassword) + Action []string `json:"action"` + // Resource specifies the object(s) this statement should apply to. (or "*" for all) + Resource string `json:"resource"` +} + +// AWSStatus containes the status of the credentials request in AWS. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type AWSProviderStatus struct { + metav1.TypeMeta `json:",inline"` + // User is the name of the User created in AWS for these credentials. + User string `json:"user"` + // Policy is the name of the policy attached to the user in AWS. + Policy string `json:"policy"` +} diff --git a/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/azure_types.go b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/azure_types.go new file mode 100644 index 00000000000..2126753e4c0 --- /dev/null +++ b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/azure_types.go @@ -0,0 +1,57 @@ +/* +Copyright 2019 The OpenShift Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TODO: these types should eventually be broken out, along with the actuator, +// to a separate repo. + +// AzureProviderSpec contains the required information to create RBAC role +// bindings for Azure. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type AzureProviderSpec struct { + metav1.TypeMeta `json:",inline"` + + // RoleBindings contains a list of roles that should be associated with the minted credential. + RoleBindings []RoleBinding `json:"roleBindings"` +} + +// RoleBinding models part of the Azure RBAC Role Binding +type RoleBinding struct { + // Role defines a set of permissions that should be associated with the minted credential. + Role string `json:"role"` +} + +// AzureProviderStatus contains the status of the credentials request in Azure. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type AzureProviderStatus struct { + metav1.TypeMeta `json:",inline"` + + // ServicePrincipalName is the name of the service principal created in Azure for these credentials. + ServicePrincipalName string `json:"name"` + + // AppID is the application id of the service principal created in Azure for these credentials. + AppID string `json:"appID"` + + // SecretLastResourceVersion is the resource version of the secret resource + // that was last synced. Used to determine if the object has changed and + // requires a sync. + SecretLastResourceVersion string `json:"secretLastResourceVersion"` +} diff --git a/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/codec.go b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/codec.go new file mode 100644 index 00000000000..4724e815370 --- /dev/null +++ b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/codec.go @@ -0,0 +1,103 @@ +/* +Copyright 2018 The OpenShift Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "bytes" + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// NewScheme creates a new Scheme +func NewScheme() (*runtime.Scheme, error) { + return SchemeBuilder.Build() +} + +// ProviderCodec is a runtime codec for providers. +// +k8s:deepcopy-gen=false +type ProviderCodec struct { + encoder runtime.Encoder + decoder runtime.Decoder +} + +// NewCodec creates a serializer/deserializer for the provider configuration +func NewCodec() (*ProviderCodec, error) { + scheme, err := NewScheme() + if err != nil { + return nil, err + } + codecFactory := serializer.NewCodecFactory(scheme) + encoder, err := newEncoder(&codecFactory) + if err != nil { + return nil, err + } + codec := ProviderCodec{ + encoder: encoder, + decoder: codecFactory.UniversalDecoder(SchemeGroupVersion), + } + return &codec, nil +} + +// EncodeProvider serializes an object to the provider spec. +func (codec *ProviderCodec) EncodeProviderSpec(in runtime.Object) (*runtime.RawExtension, error) { + var buf bytes.Buffer + if err := codec.encoder.Encode(in, &buf); err != nil { + return nil, fmt.Errorf("encoding failed: %v", err) + } + return &runtime.RawExtension{Raw: buf.Bytes()}, nil +} + +// DecodeProviderSpec deserializes an object from the provider config. +func (codec *ProviderCodec) DecodeProviderSpec(providerConfig *runtime.RawExtension, out runtime.Object) error { + _, _, err := codec.decoder.Decode(providerConfig.Raw, nil, out) + if err != nil { + return fmt.Errorf("decoding failure: %v", err) + } + return nil +} + +// EncodeProviderStatus serializes the provider status. +func (codec *ProviderCodec) EncodeProviderStatus(in runtime.Object) (*runtime.RawExtension, error) { + var buf bytes.Buffer + if err := codec.encoder.Encode(in, &buf); err != nil { + return nil, fmt.Errorf("encoding failed: %v", err) + } + return &runtime.RawExtension{Raw: buf.Bytes()}, nil +} + +// DecodeProviderStatus deserializes the provider status. +func (codec *ProviderCodec) DecodeProviderStatus(providerStatus *runtime.RawExtension, out runtime.Object) error { + if providerStatus != nil { + _, _, err := codec.decoder.Decode(providerStatus.Raw, nil, out) + if err != nil { + return fmt.Errorf("decoding failure: %v", err) + } + return nil + } + return nil +} + +func newEncoder(codecFactory *serializer.CodecFactory) (runtime.Encoder, error) { + serializerInfos := codecFactory.SupportedMediaTypes() + if len(serializerInfos) == 0 { + return nil, fmt.Errorf("unable to find any serlializers") + } + encoder := codecFactory.EncoderForVersion(serializerInfos[0].Serializer, SchemeGroupVersion) + return encoder, nil +} diff --git a/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/credentialsrequest_types.go b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/credentialsrequest_types.go new file mode 100644 index 00000000000..3eeabd2f57a --- /dev/null +++ b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/credentialsrequest_types.go @@ -0,0 +1,153 @@ +/* +Copyright 2018 The OpenShift Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // FinalizerDeprovision is used on CredentialsRequests to ensure we delete the + // credentials in AWS before allowing the CredentialsRequest to be deleted in etcd. + FinalizerDeprovision string = "cloudcredential.openshift.io/deprovision" + + // AnnotationCredentialsRequest is used on Secrets created as a target of CredentailsRequests. + // The annotation value will map back to the namespace/name of the CredentialsRequest that created + // or adopted the secret. + AnnotationCredentialsRequest string = "cloudcredential.openshift.io/credentials-request" + + // AnnotationAWSPolicyLastApplied is added to target Secrets indicating the last AWS policy + // we successfully applied. It is used to compare if changes are necessary, without requiring + // AWS credentials to view the actual state. + AnnotationAWSPolicyLastApplied string = "cloudcredential.openshift.io/aws-policy-last-applied" + + // CloudCredOperatorNamespace is the namespace where the credentials operator runs. + CloudCredOperatorNamespace = "openshift-cloud-credential-operator" + + // CloudCredOperatorConfigMap is an optional ConfigMap that can be used to alter behavior of the operator. + CloudCredOperatorConfigMap = "cloud-credential-operator-config" +) + +// NOTE: Run "make" to regenerate code after modifying this file + +// CredentialsRequestSpec defines the desired state of CredentialsRequest +type CredentialsRequestSpec struct { + // SecretRef points to the secret where the credentials should be stored once generated. + SecretRef corev1.ObjectReference `json:"secretRef"` + + // ProviderSpec contains the cloud provider specific credentials specification. + ProviderSpec *runtime.RawExtension `json:"providerSpec,omitempty"` +} + +// CredentialsRequestStatus defines the observed state of CredentialsRequest +type CredentialsRequestStatus struct { + // Provisioned is true once the credentials have been initially provisioned. + Provisioned bool `json:"provisioned"` + + // LastSyncTimestamp is the time that the credentials were last synced. + LastSyncTimestamp *metav1.Time `json:"lastSyncTimestamp,omitempty"` + + // LastSyncGeneration is the generation of the credentials request resource + // that was last synced. Used to determine if the object has changed and + // requires a sync. + LastSyncGeneration int64 `json:"lastSyncGeneration"` + + // ProviderStatus contains cloud provider specific status. + ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty"` + + // Conditions includes detailed status for the CredentialsRequest + // +optional + Conditions []CredentialsRequestCondition `json:"conditions,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CredentialsRequest is the Schema for the credentialsrequests API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +type CredentialsRequest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CredentialsRequestSpec `json:"spec"` + Status CredentialsRequestStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CredentialsRequestList contains a list of CredentialsRequest +type CredentialsRequestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CredentialsRequest `json:"items"` +} + +// CredentialsRequestCondition contains details for any of the conditions on a CredentialsRequest object +type CredentialsRequestCondition struct { + // Type is the specific type of the condition + Type CredentialsRequestConditionType `json:"type"` + // Status is the status of the condition + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about the last transition + Message string `json:"message,omitempty"` +} + +// CredentialsRequestConditionType are the valid condition types for a CredentialsRequest +type CredentialsRequestConditionType string + +// These are valid conditions for a CredentialsRequest +const ( + // InsufficientCloudCredentials is true when the cloud credentials are deemed to be insufficient + // to either mint custom creds to satisfy the CredentialsRequest or insufficient to + // be able to be passed along as-is to satisfy the CredentialsRequest + InsufficientCloudCredentials CredentialsRequestConditionType = "InsufficientCloudCreds" + // MissingTargetNamespace is true when the namespace specified to hold the resulting + // credentials is not present + MissingTargetNamespace CredentialsRequestConditionType = "MissingTargetNamespace" + // CredentialsProvisionFailure is true whenver there has been an issue while trying + // to provision the credentials (either passthrough or minting). Error message will + // be stored directly in the condition message. + CredentialsProvisionFailure CredentialsRequestConditionType = "CredentialsProvisionFailure" + // CredentialsDeprovisionFailure is true whenever there is an error when trying + // to clean up any previously-created cloud resources + CredentialsDeprovisionFailure CredentialsRequestConditionType = "CredentialsDeprovisionFailure" + // Ignored is true when the CredentialsRequest's ProviderSpec is for + // a different infrastructure platform than what the cluster has been + // deployed to. This is normal as the release image contains CredentialsRequests for all + // possible clouds/infrastructure, and cloud-credential-operator will only act on the + // CredentialsRequests where the cloud/infra matches. + Ignored CredentialsRequestConditionType = "Ignored" +) + +func init() { + SchemeBuilder.Register( + &CredentialsRequest{}, &CredentialsRequestList{}, + &AWSProviderStatus{}, &AWSProviderSpec{}, + &AzureProviderStatus{}, &AzureProviderSpec{}, + &GCPProviderStatus{}, &GCPProviderSpec{}, + &VSphereProviderStatus{}, &VSphereProviderSpec{}, + ) +} diff --git a/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/doc.go b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/doc.go new file mode 100644 index 00000000000..9b77a72e6f2 --- /dev/null +++ b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The OpenShift Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the cloudcredential v1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential +// +k8s:defaulter-gen=TypeMeta +// +groupName=cloudcredential.openshift.io +package v1 diff --git a/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/gcp_types.go b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/gcp_types.go new file mode 100644 index 00000000000..32f88970944 --- /dev/null +++ b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/gcp_types.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The OpenShift Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TODO: these types should eventually be broken out, along with the actuator, to a separate repo. + +// GCPProviderSpec contains the required information to create a service account with policy bindings in GCP. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type GCPProviderSpec struct { + metav1.TypeMeta `json:",inline"` + // PredefinedRoles is the list of GCP pre-defined roles + // that the CredentialsRequest requires. + PredefinedRoles []string `json:"predefinedRoles"` + // SkipServiceCheck can be set to true to skip the check whether the requested roles + // have the necessary services enabled + // +optional + SkipServiceCheck bool `json:"skipServiceCheck,omitempty"` +} + +// GCPProviderStatus contains the status of the GCP credentials request. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type GCPProviderStatus struct { + metav1.TypeMeta `json:",inline"` + // ServiceAccountID is the ID of the service account created in GCP for the requested credentials. + ServiceAccountID string `json:"serviceAccountID"` +} diff --git a/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/openstack_types.go b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/openstack_types.go new file mode 100644 index 00000000000..d83ffb7b271 --- /dev/null +++ b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/openstack_types.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The OpenShift Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TODO: these types should eventually be broken out, along with the actuator, +// to a separate repo. + +// OpenStackProviderSpec the specification of the credentials request in OpenStack. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type OpenStackProviderSpec struct { + metav1.TypeMeta `json:",inline"` +} + +// OpenStackProviderStatus contains the status of the credentials request in OpenStack. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type OpenStackProviderStatus struct { + metav1.TypeMeta `json:",inline"` +} diff --git a/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/ovirt_types.go b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/ovirt_types.go new file mode 100644 index 00000000000..c88b75f82b3 --- /dev/null +++ b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/ovirt_types.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The OpenShift Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TODO: these types should eventually be broken out, along with the actuator, +// to a separate repo. + +// OvirtProviderSpec the specification of the credentials request in Ovirt. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type OvirtProviderSpec struct { + metav1.TypeMeta `json:",inline"` +} + +// OvirtProviderStatus contains the status of the credentials request in Ovirt. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type OvirtProviderStatus struct { + metav1.TypeMeta `json:",inline"` +} diff --git a/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/register.go b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/register.go new file mode 100644 index 00000000000..36bc3a66f50 --- /dev/null +++ b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/register.go @@ -0,0 +1,44 @@ +/* +Copyright 2018 The OpenShift Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the cloudcredential v1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential +// +k8s:defaulter-gen=TypeMeta +// +groupName=cloudcredential.openshift.io +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "cloudcredential.openshift.io", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme is required by pkg/client/... + AddToScheme = SchemeBuilder.AddToScheme +) + +// Resource is required by pkg/client/listers/... +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/vsphere_types.go b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/vsphere_types.go new file mode 100644 index 00000000000..062ee454a1e --- /dev/null +++ b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/vsphere_types.go @@ -0,0 +1,61 @@ +/* +Copyright 2020 The OpenShift Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TODO: these types should eventually be broken out, along with the actuator, +// to a separate repo. + +// VSphereProviderSpec contains the required information to create RBAC role +// bindings for VSphere. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type VSphereProviderSpec struct { + metav1.TypeMeta `json:",inline"` + + // Permissions contains a list of groups of privileges that are being requested. + Permissions []VSpherePermission `json:"permissions"` +} + +// VSpherePermission captures the details of the privileges being requested for the list of entities. +type VSpherePermission struct { + // Privileges is the list of access being requested. + Privileges []string `json:"privileges"` + + // TODO: when implementing mint-mode will need to figure out how to allow + // a CredentialsRequest to indicate that the above list of privileges should + // be bound to a specific scope(s) (eg Storage, Hosts/Clusters, Networking, Global, etc). + // Entities is the list of entities for which the list of permissions should be granted + // access to. + // Entities []string `json:"entities"` + + // Also will need to allow specifying whether permissions should "Propagate to children". + // Propagate bool `json:"propagate"` +} + +// VSphereProviderStatus contains the status of the credentials request in VSphere. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type VSphereProviderStatus struct { + metav1.TypeMeta `json:",inline"` + + // SecretLastResourceVersion is the resource version of the secret resource + // that was last synced. Used to determine if the object has changed and + // requires a sync. + SecretLastResourceVersion string `json:"secretLastResourceVersion"` +} diff --git a/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..122f310329e --- /dev/null +++ b/vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/zz_generated.deepcopy.go @@ -0,0 +1,539 @@ +// +build !ignore_autogenerated + +/* +Copyright 2018 The OpenShift Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by main. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSProviderSpec) DeepCopyInto(out *AWSProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.StatementEntries != nil { + in, out := &in.StatementEntries, &out.StatementEntries + *out = make([]StatementEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSProviderSpec. +func (in *AWSProviderSpec) DeepCopy() *AWSProviderSpec { + if in == nil { + return nil + } + out := new(AWSProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSProviderStatus) DeepCopyInto(out *AWSProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSProviderStatus. +func (in *AWSProviderStatus) DeepCopy() *AWSProviderStatus { + if in == nil { + return nil + } + out := new(AWSProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureProviderSpec) DeepCopyInto(out *AzureProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.RoleBindings != nil { + in, out := &in.RoleBindings, &out.RoleBindings + *out = make([]RoleBinding, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureProviderSpec. +func (in *AzureProviderSpec) DeepCopy() *AzureProviderSpec { + if in == nil { + return nil + } + out := new(AzureProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureProviderStatus) DeepCopyInto(out *AzureProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureProviderStatus. +func (in *AzureProviderStatus) DeepCopy() *AzureProviderStatus { + if in == nil { + return nil + } + out := new(AzureProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CredentialsRequest) DeepCopyInto(out *CredentialsRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialsRequest. +func (in *CredentialsRequest) DeepCopy() *CredentialsRequest { + if in == nil { + return nil + } + out := new(CredentialsRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CredentialsRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CredentialsRequestCondition) DeepCopyInto(out *CredentialsRequestCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialsRequestCondition. +func (in *CredentialsRequestCondition) DeepCopy() *CredentialsRequestCondition { + if in == nil { + return nil + } + out := new(CredentialsRequestCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CredentialsRequestList) DeepCopyInto(out *CredentialsRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CredentialsRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialsRequestList. +func (in *CredentialsRequestList) DeepCopy() *CredentialsRequestList { + if in == nil { + return nil + } + out := new(CredentialsRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CredentialsRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CredentialsRequestSpec) DeepCopyInto(out *CredentialsRequestSpec) { + *out = *in + out.SecretRef = in.SecretRef + if in.ProviderSpec != nil { + in, out := &in.ProviderSpec, &out.ProviderSpec + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialsRequestSpec. +func (in *CredentialsRequestSpec) DeepCopy() *CredentialsRequestSpec { + if in == nil { + return nil + } + out := new(CredentialsRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CredentialsRequestStatus) DeepCopyInto(out *CredentialsRequestStatus) { + *out = *in + if in.LastSyncTimestamp != nil { + in, out := &in.LastSyncTimestamp, &out.LastSyncTimestamp + *out = (*in).DeepCopy() + } + if in.ProviderStatus != nil { + in, out := &in.ProviderStatus, &out.ProviderStatus + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CredentialsRequestCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialsRequestStatus. +func (in *CredentialsRequestStatus) DeepCopy() *CredentialsRequestStatus { + if in == nil { + return nil + } + out := new(CredentialsRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPProviderSpec) DeepCopyInto(out *GCPProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.PredefinedRoles != nil { + in, out := &in.PredefinedRoles, &out.PredefinedRoles + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPProviderSpec. +func (in *GCPProviderSpec) DeepCopy() *GCPProviderSpec { + if in == nil { + return nil + } + out := new(GCPProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GCPProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPProviderStatus) DeepCopyInto(out *GCPProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPProviderStatus. +func (in *GCPProviderStatus) DeepCopy() *GCPProviderStatus { + if in == nil { + return nil + } + out := new(GCPProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GCPProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackProviderSpec) DeepCopyInto(out *OpenStackProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackProviderSpec. +func (in *OpenStackProviderSpec) DeepCopy() *OpenStackProviderSpec { + if in == nil { + return nil + } + out := new(OpenStackProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenStackProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackProviderStatus) DeepCopyInto(out *OpenStackProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackProviderStatus. +func (in *OpenStackProviderStatus) DeepCopy() *OpenStackProviderStatus { + if in == nil { + return nil + } + out := new(OpenStackProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenStackProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtProviderSpec) DeepCopyInto(out *OvirtProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtProviderSpec. +func (in *OvirtProviderSpec) DeepCopy() *OvirtProviderSpec { + if in == nil { + return nil + } + out := new(OvirtProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OvirtProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtProviderStatus) DeepCopyInto(out *OvirtProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtProviderStatus. +func (in *OvirtProviderStatus) DeepCopy() *OvirtProviderStatus { + if in == nil { + return nil + } + out := new(OvirtProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OvirtProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBinding) DeepCopyInto(out *RoleBinding) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. +func (in *RoleBinding) DeepCopy() *RoleBinding { + if in == nil { + return nil + } + out := new(RoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatementEntry) DeepCopyInto(out *StatementEntry) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatementEntry. +func (in *StatementEntry) DeepCopy() *StatementEntry { + if in == nil { + return nil + } + out := new(StatementEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePermission) DeepCopyInto(out *VSpherePermission) { + *out = *in + if in.Privileges != nil { + in, out := &in.Privileges, &out.Privileges + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePermission. +func (in *VSpherePermission) DeepCopy() *VSpherePermission { + if in == nil { + return nil + } + out := new(VSpherePermission) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereProviderSpec) DeepCopyInto(out *VSphereProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = make([]VSpherePermission, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereProviderSpec. +func (in *VSphereProviderSpec) DeepCopy() *VSphereProviderSpec { + if in == nil { + return nil + } + out := new(VSphereProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VSphereProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereProviderStatus) DeepCopyInto(out *VSphereProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereProviderStatus. +func (in *VSphereProviderStatus) DeepCopy() *VSphereProviderStatus { + if in == nil { + return nil + } + out := new(VSphereProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VSphereProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/client-go/dynamic/fake/simple.go b/vendor/k8s.io/client-go/dynamic/fake/simple.go new file mode 100644 index 00000000000..dee16b245a3 --- /dev/null +++ b/vendor/k8s.io/client-go/dynamic/fake/simple.go @@ -0,0 +1,493 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/testing" +) + +func NewSimpleDynamicClient(scheme *runtime.Scheme, objects ...runtime.Object) *FakeDynamicClient { + unstructuredScheme := runtime.NewScheme() + for gvk := range scheme.AllKnownTypes() { + if unstructuredScheme.Recognizes(gvk) { + continue + } + if strings.HasSuffix(gvk.Kind, "List") { + unstructuredScheme.AddKnownTypeWithName(gvk, &unstructured.UnstructuredList{}) + continue + } + unstructuredScheme.AddKnownTypeWithName(gvk, &unstructured.Unstructured{}) + } + + objects, err := convertObjectsToUnstructured(scheme, objects) + if err != nil { + panic(err) + } + + for _, obj := range objects { + gvk := obj.GetObjectKind().GroupVersionKind() + if !unstructuredScheme.Recognizes(gvk) { + unstructuredScheme.AddKnownTypeWithName(gvk, &unstructured.Unstructured{}) + } + gvk.Kind += "List" + if !unstructuredScheme.Recognizes(gvk) { + unstructuredScheme.AddKnownTypeWithName(gvk, &unstructured.UnstructuredList{}) + } + } + + return NewSimpleDynamicClientWithCustomListKinds(unstructuredScheme, nil, objects...) +} + +// NewSimpleDynamicClientWithCustomListKinds try not to use this. In general you want to have the scheme have the List types registered +// and allow the default guessing for resources match. Sometimes that doesn't work, so you can specify a custom mapping here. +func NewSimpleDynamicClientWithCustomListKinds(scheme *runtime.Scheme, gvrToListKind map[schema.GroupVersionResource]string, objects ...runtime.Object) *FakeDynamicClient { + // In order to use List with this client, you have to have your lists registered so that the object tracker will find them + // in the scheme to support the t.scheme.New(listGVK) call when it's building the return value. + // Since the base fake client needs the listGVK passed through the action (in cases where there are no instances, it + // cannot look up the actual hits), we need to know a mapping of GVR to listGVK here. For GETs and other types of calls, + // there is no return value that contains a GVK, so it doesn't have to know the mapping in advance. + + // first we attempt to invert known List types from the scheme to auto guess the resource with unsafe guesses + // this covers common usage of registering types in scheme and passing them + completeGVRToListKind := map[schema.GroupVersionResource]string{} + for listGVK := range scheme.AllKnownTypes() { + if !strings.HasSuffix(listGVK.Kind, "List") { + continue + } + nonListGVK := listGVK.GroupVersion().WithKind(listGVK.Kind[:len(listGVK.Kind)-4]) + plural, _ := meta.UnsafeGuessKindToResource(nonListGVK) + completeGVRToListKind[plural] = listGVK.Kind + } + + for gvr, listKind := range gvrToListKind { + if !strings.HasSuffix(listKind, "List") { + panic("coding error, listGVK must end in List or this fake client doesn't work right") + } + listGVK := gvr.GroupVersion().WithKind(listKind) + + // if we already have this type registered, just skip it + if _, err := scheme.New(listGVK); err == nil { + completeGVRToListKind[gvr] = listKind + continue + } + + scheme.AddKnownTypeWithName(listGVK, &unstructured.UnstructuredList{}) + completeGVRToListKind[gvr] = listKind + } + + codecs := serializer.NewCodecFactory(scheme) + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &FakeDynamicClient{scheme: scheme, gvrToListKind: completeGVRToListKind, tracker: o} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type FakeDynamicClient struct { + testing.Fake + scheme *runtime.Scheme + gvrToListKind map[schema.GroupVersionResource]string + tracker testing.ObjectTracker +} + +type dynamicResourceClient struct { + client *FakeDynamicClient + namespace string + resource schema.GroupVersionResource + listKind string +} + +var ( + _ dynamic.Interface = &FakeDynamicClient{} + _ testing.FakeClient = &FakeDynamicClient{} +) + +func (c *FakeDynamicClient) Tracker() testing.ObjectTracker { + return c.tracker +} + +func (c *FakeDynamicClient) Resource(resource schema.GroupVersionResource) dynamic.NamespaceableResourceInterface { + return &dynamicResourceClient{client: c, resource: resource, listKind: c.gvrToListKind[resource]} +} + +func (c *dynamicResourceClient) Namespace(ns string) dynamic.ResourceInterface { + ret := *c + ret.namespace = ns + return &ret +} + +func (c *dynamicResourceClient) Create(ctx context.Context, obj *unstructured.Unstructured, opts metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) { + var uncastRet runtime.Object + var err error + switch { + case len(c.namespace) == 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootCreateAction(c.resource, obj), obj) + + case len(c.namespace) == 0 && len(subresources) > 0: + var accessor metav1.Object // avoid shadowing err + accessor, err = meta.Accessor(obj) + if err != nil { + return nil, err + } + name := accessor.GetName() + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootCreateSubresourceAction(c.resource, name, strings.Join(subresources, "/"), obj), obj) + + case len(c.namespace) > 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewCreateAction(c.resource, c.namespace, obj), obj) + + case len(c.namespace) > 0 && len(subresources) > 0: + var accessor metav1.Object // avoid shadowing err + accessor, err = meta.Accessor(obj) + if err != nil { + return nil, err + } + name := accessor.GetName() + uncastRet, err = c.client.Fake. + Invokes(testing.NewCreateSubresourceAction(c.resource, name, strings.Join(subresources, "/"), c.namespace, obj), obj) + + } + + if err != nil { + return nil, err + } + if uncastRet == nil { + return nil, err + } + + ret := &unstructured.Unstructured{} + if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { + return nil, err + } + return ret, err +} + +func (c *dynamicResourceClient) Update(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) { + var uncastRet runtime.Object + var err error + switch { + case len(c.namespace) == 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootUpdateAction(c.resource, obj), obj) + + case len(c.namespace) == 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(c.resource, strings.Join(subresources, "/"), obj), obj) + + case len(c.namespace) > 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewUpdateAction(c.resource, c.namespace, obj), obj) + + case len(c.namespace) > 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewUpdateSubresourceAction(c.resource, strings.Join(subresources, "/"), c.namespace, obj), obj) + + } + + if err != nil { + return nil, err + } + if uncastRet == nil { + return nil, err + } + + ret := &unstructured.Unstructured{} + if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { + return nil, err + } + return ret, err +} + +func (c *dynamicResourceClient) UpdateStatus(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error) { + var uncastRet runtime.Object + var err error + switch { + case len(c.namespace) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(c.resource, "status", obj), obj) + + case len(c.namespace) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewUpdateSubresourceAction(c.resource, "status", c.namespace, obj), obj) + + } + + if err != nil { + return nil, err + } + if uncastRet == nil { + return nil, err + } + + ret := &unstructured.Unstructured{} + if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { + return nil, err + } + return ret, err +} + +func (c *dynamicResourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error { + var err error + switch { + case len(c.namespace) == 0 && len(subresources) == 0: + _, err = c.client.Fake. + Invokes(testing.NewRootDeleteAction(c.resource, name), &metav1.Status{Status: "dynamic delete fail"}) + + case len(c.namespace) == 0 && len(subresources) > 0: + _, err = c.client.Fake. + Invokes(testing.NewRootDeleteSubresourceAction(c.resource, strings.Join(subresources, "/"), name), &metav1.Status{Status: "dynamic delete fail"}) + + case len(c.namespace) > 0 && len(subresources) == 0: + _, err = c.client.Fake. + Invokes(testing.NewDeleteAction(c.resource, c.namespace, name), &metav1.Status{Status: "dynamic delete fail"}) + + case len(c.namespace) > 0 && len(subresources) > 0: + _, err = c.client.Fake. + Invokes(testing.NewDeleteSubresourceAction(c.resource, strings.Join(subresources, "/"), c.namespace, name), &metav1.Status{Status: "dynamic delete fail"}) + } + + return err +} + +func (c *dynamicResourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var err error + switch { + case len(c.namespace) == 0: + action := testing.NewRootDeleteCollectionAction(c.resource, listOptions) + _, err = c.client.Fake.Invokes(action, &metav1.Status{Status: "dynamic deletecollection fail"}) + + case len(c.namespace) > 0: + action := testing.NewDeleteCollectionAction(c.resource, c.namespace, listOptions) + _, err = c.client.Fake.Invokes(action, &metav1.Status{Status: "dynamic deletecollection fail"}) + + } + + return err +} + +func (c *dynamicResourceClient) Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) { + var uncastRet runtime.Object + var err error + switch { + case len(c.namespace) == 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootGetAction(c.resource, name), &metav1.Status{Status: "dynamic get fail"}) + + case len(c.namespace) == 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootGetSubresourceAction(c.resource, strings.Join(subresources, "/"), name), &metav1.Status{Status: "dynamic get fail"}) + + case len(c.namespace) > 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewGetAction(c.resource, c.namespace, name), &metav1.Status{Status: "dynamic get fail"}) + + case len(c.namespace) > 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewGetSubresourceAction(c.resource, c.namespace, strings.Join(subresources, "/"), name), &metav1.Status{Status: "dynamic get fail"}) + } + + if err != nil { + return nil, err + } + if uncastRet == nil { + return nil, err + } + + ret := &unstructured.Unstructured{} + if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { + return nil, err + } + return ret, err +} + +func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { + if len(c.listKind) == 0 { + panic(fmt.Sprintf("coding error: you must register resource to list kind for every resource you're going to LIST when creating the client. See NewSimpleDynamicClientWithCustomListKinds or register the list into the scheme: %v out of %v", c.resource, c.client.gvrToListKind)) + } + listGVK := c.resource.GroupVersion().WithKind(c.listKind) + listForFakeClientGVK := c.resource.GroupVersion().WithKind(c.listKind[:len(c.listKind)-4]) /*base library appends List*/ + + var obj runtime.Object + var err error + switch { + case len(c.namespace) == 0: + obj, err = c.client.Fake. + Invokes(testing.NewRootListAction(c.resource, listForFakeClientGVK, opts), &metav1.Status{Status: "dynamic list fail"}) + + case len(c.namespace) > 0: + obj, err = c.client.Fake. + Invokes(testing.NewListAction(c.resource, listForFakeClientGVK, c.namespace, opts), &metav1.Status{Status: "dynamic list fail"}) + + } + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + + retUnstructured := &unstructured.Unstructured{} + if err := c.client.scheme.Convert(obj, retUnstructured, nil); err != nil { + return nil, err + } + entireList, err := retUnstructured.ToList() + if err != nil { + return nil, err + } + + list := &unstructured.UnstructuredList{} + list.SetResourceVersion(entireList.GetResourceVersion()) + list.GetObjectKind().SetGroupVersionKind(listGVK) + for i := range entireList.Items { + item := &entireList.Items[i] + metadata, err := meta.Accessor(item) + if err != nil { + return nil, err + } + if label.Matches(labels.Set(metadata.GetLabels())) { + list.Items = append(list.Items, *item) + } + } + return list, nil +} + +func (c *dynamicResourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + switch { + case len(c.namespace) == 0: + return c.client.Fake. + InvokesWatch(testing.NewRootWatchAction(c.resource, opts)) + + case len(c.namespace) > 0: + return c.client.Fake. + InvokesWatch(testing.NewWatchAction(c.resource, c.namespace, opts)) + + } + + panic("math broke") +} + +// TODO: opts are currently ignored. +func (c *dynamicResourceClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) { + var uncastRet runtime.Object + var err error + switch { + case len(c.namespace) == 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootPatchAction(c.resource, name, pt, data), &metav1.Status{Status: "dynamic patch fail"}) + + case len(c.namespace) == 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootPatchSubresourceAction(c.resource, name, pt, data, subresources...), &metav1.Status{Status: "dynamic patch fail"}) + + case len(c.namespace) > 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewPatchAction(c.resource, c.namespace, name, pt, data), &metav1.Status{Status: "dynamic patch fail"}) + + case len(c.namespace) > 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewPatchSubresourceAction(c.resource, c.namespace, name, pt, data, subresources...), &metav1.Status{Status: "dynamic patch fail"}) + + } + + if err != nil { + return nil, err + } + if uncastRet == nil { + return nil, err + } + + ret := &unstructured.Unstructured{} + if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { + return nil, err + } + return ret, err +} + +func convertObjectsToUnstructured(s *runtime.Scheme, objs []runtime.Object) ([]runtime.Object, error) { + ul := make([]runtime.Object, 0, len(objs)) + + for _, obj := range objs { + u, err := convertToUnstructured(s, obj) + if err != nil { + return nil, err + } + + ul = append(ul, u) + } + return ul, nil +} + +func convertToUnstructured(s *runtime.Scheme, obj runtime.Object) (runtime.Object, error) { + var ( + err error + u unstructured.Unstructured + ) + + u.Object, err = runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, fmt.Errorf("failed to convert to unstructured: %w", err) + } + + gvk := u.GroupVersionKind() + if gvk.Group == "" || gvk.Kind == "" { + gvks, _, err := s.ObjectKinds(obj) + if err != nil { + return nil, fmt.Errorf("failed to convert to unstructured - unable to get GVK %w", err) + } + apiv, k := gvks[0].ToAPIVersionAndKind() + u.SetAPIVersion(apiv) + u.SetKind(k) + } + return &u, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index f0526624270..39f3bf63246 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1047,6 +1047,9 @@ github.com/openshift/client-go/samples/clientset/versioned/typed/samples/v1 github.com/openshift/client-go/security/clientset/versioned github.com/openshift/client-go/security/clientset/versioned/scheme github.com/openshift/client-go/security/clientset/versioned/typed/security/v1 +# github.com/openshift/cloud-credential-operator v0.0.0-00010101000000-000000000000 => github.com/openshift/cloud-credential-operator v0.0.0-20200316201045-d10080b52c9e +## explicit +github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1 # github.com/openshift/console-operator v0.0.0-20220407014945-45d37e70e0c2 => github.com/openshift/console-operator v0.0.0-20220318130441-e44516b9c315 ## explicit; go 1.16 github.com/openshift/console-operator/pkg/api @@ -1726,6 +1729,7 @@ k8s.io/client-go/discovery k8s.io/client-go/discovery/cached/disk k8s.io/client-go/discovery/fake k8s.io/client-go/dynamic +k8s.io/client-go/dynamic/fake k8s.io/client-go/informers k8s.io/client-go/informers/admissionregistration k8s.io/client-go/informers/admissionregistration/v1