From 41a6e7c1e008da1b3ceee74f0a782ea4a397db09 Mon Sep 17 00:00:00 2001 From: Amit Arora Date: Wed, 21 Aug 2024 21:30:54 +0530 Subject: [PATCH] Metrics for SyncSet and SelectorSyncSets merging 8659 and 9545 Metrics for SyncSet and SelectorSyncSets --- go.mod | 2 +- pkg/frontend/admin_hive_syncset_resources.go | 65 +++ .../admin_hive_syncset_resources_test.go | 112 ++++++ pkg/frontend/frontend.go | 2 + pkg/hive/manager.go | 19 + pkg/hive/manager_test.go | 50 +++ pkg/monitor/cluster/cluster.go | 30 +- pkg/monitor/cluster/clustersync.go | 69 ++++ pkg/monitor/worker.go | 11 +- pkg/util/mocks/hive/hive.go | 16 + pkg/util/scheme/scheme.go | 2 + test/e2e/monitor.go | 4 +- .../v1alpha1/clustersync_types.go | 163 ++++++++ .../v1alpha1/clustersynclease_types.go | 37 ++ .../hive/apis/hiveinternal/v1alpha1/doc.go | 7 + .../v1alpha1/fakeclusterinstall_types.go | 56 +++ .../apis/hiveinternal/v1alpha1/register.go | 36 ++ .../v1alpha1/zz_generated.deepcopy.go | 378 ++++++++++++++++++ vendor/modules.txt | 3 +- 19 files changed, 1046 insertions(+), 16 deletions(-) create mode 100644 pkg/frontend/admin_hive_syncset_resources.go create mode 100644 pkg/frontend/admin_hive_syncset_resources_test.go create mode 100644 pkg/monitor/cluster/clustersync.go create mode 100644 vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/clustersync_types.go create mode 100644 vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/clustersynclease_types.go create mode 100644 vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/doc.go create mode 100644 vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/fakeclusterinstall_types.go create mode 100644 vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/register.go create mode 100644 vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/zz_generated.deepcopy.go diff --git a/go.mod b/go.mod index e0a9131ecab..c3a3fd415f0 100644 --- a/go.mod +++ b/go.mod @@ -59,7 +59,7 @@ require ( github.com/openshift/api v0.0.0-20240103200955-7ca3a4634e46 github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c github.com/openshift/cloud-credential-operator v0.0.0-00010101000000-000000000000 - github.com/openshift/hive/apis v0.0.0-20240812130639-bdf9d08a060a + github.com/openshift/hive/apis v0.0.0-20240821011206-1ec27ad45d5a github.com/openshift/library-go v0.0.0-20220525173854-9b950a41acdc github.com/openshift/machine-config-operator v0.0.1-0.20230519222939-1abc13efbb0d github.com/pires/go-proxyproto v0.6.2 diff --git a/pkg/frontend/admin_hive_syncset_resources.go b/pkg/frontend/admin_hive_syncset_resources.go new file mode 100644 index 00000000000..1e42aa8856f --- /dev/null +++ b/pkg/frontend/admin_hive_syncset_resources.go @@ -0,0 +1,65 @@ +package frontend + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "context" + "net/http" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" + "github.com/ugorji/go/codec" + + "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/frontend/middleware" +) + +func (f *frontend) getAdminHiveSyncsetResources(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + log := ctx.Value(middleware.ContextKeyLog).(*logrus.Entry) + clusterdeployment := strings.TrimPrefix(filepath.Dir(r.URL.Path), "/admin") + b, err := f._getAdminHiveSyncsetResources(ctx, clusterdeployment) + + if cloudErr, ok := err.(*api.CloudError); ok { + api.WriteCloudError(w, cloudErr) + return + } + + adminReply(log, w, nil, b, err) +} + +func (f *frontend) _getAdminHiveSyncsetResources(ctx context.Context, namespace string) ([]byte, error) { + // we have to check if the frontend has a valid clustermanager since hive is not everywhere. + if f.hiveClusterManager == nil { + return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", "hive is not enabled") + } + + dbOpenShiftClusters, err := f.dbGroup.OpenShiftClusters() + if err != nil { + return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", err.Error()) + } + + doc, err := dbOpenShiftClusters.Get(ctx, namespace) + if err != nil { + return nil, api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeNotFound, "", "cluster not found") + } + + if doc.OpenShiftCluster.Properties.HiveProfile.Namespace == "" { + return nil, api.NewCloudError(http.StatusNoContent, api.CloudErrorCodeResourceNotFound, "", "cluster is not managed by hive") + } + + cd, err := f.hiveClusterManager.GetSyncSetResources(ctx, doc) + if err != nil { + return nil, api.NewCloudError(http.StatusNotFound, api.CloudErrorCodeNotFound, "", "cluster deployment not found") + } + + var b []byte + err = codec.NewEncoderBytes(&b, &codec.JsonHandle{}).Encode(cd) + if err != nil { + return nil, api.NewCloudError(http.StatusInternalServerError, api.CloudErrorCodeInternalServerError, "", "unable to marshal response") + } + + return b, nil +} diff --git a/pkg/frontend/admin_hive_syncset_resources_test.go b/pkg/frontend/admin_hive_syncset_resources_test.go new file mode 100644 index 00000000000..b32373b0832 --- /dev/null +++ b/pkg/frontend/admin_hive_syncset_resources_test.go @@ -0,0 +1,112 @@ +package frontend + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "context" + "fmt" + "net/http" + "strings" + "testing" + + "github.com/golang/mock/gomock" + "github.com/openshift/hive/apis/hiveinternal/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/metrics/noop" + mock_env "github.com/Azure/ARO-RP/pkg/util/mocks/env" + mock_hive "github.com/Azure/ARO-RP/pkg/util/mocks/hive" +) + +func TestGetAdminHiveSyncsetResources(t *testing.T) { + fakeNamespace := "aro-00000000-0000-0000-0000-000000000000" + ctx := context.Background() + clusterSyncsetTest := &v1alpha1.ClusterSync{ + ObjectMeta: metav1.ObjectMeta{ + Name: "clustersync1", + Namespace: fakeNamespace, + }, + } + + type test struct { + name string + namespace string + hiveEnabled bool + mocks func(*test, *mock_hive.MockClusterManager) + wantStatusCode int + wantResponse []byte + wantError string + } + + for _, tt := range []*test{ + { + name: "Cluster SyncSets must be namespaced", + namespace: "", + hiveEnabled: true, + mocks: func(tt *test, s *mock_hive.MockClusterManager) {}, + wantStatusCode: http.StatusInternalServerError, + wantError: "500: InternalServerError: : hive is not enabled", //"404: NotFound: : hive is not enabled", + }, + { + name: "List ClusterSync resources successfully", + namespace: "hive", + wantError: "", + mocks: func(tt *test, s *mock_hive.MockClusterManager) { + s.EXPECT(). + GetSyncSetResources(gomock.Any(), gomock.Any()). + Return(&clusterSyncsetTest, nil).Times(1) + }, + wantStatusCode: http.StatusOK, + }, + { + name: "Hive is not enabled", + namespace: fakeNamespace, + mocks: nil, + hiveEnabled: false, + wantStatusCode: http.StatusInternalServerError, + wantError: "500: InternalServerError: : hive is not enabled", + }, + } { + t.Run(tt.name, func(t *testing.T) { + ti := newTestInfra(t).WithOpenShiftClusters().WithSubscriptions() + defer ti.done() + + _env := ti.env.(*mock_env.MockInterface) + var f *frontend + var err error + if tt.hiveEnabled { + s := mock_hive.NewMockClusterManager(ti.controller) //NewMockSyncSetResourceManager(ti.controller) + tt.mocks(tt, s) + f, err = NewFrontend(ctx, ti.audit, ti.log, _env, ti.dbGroup, api.APIs, &noop.Noop{}, &noop.Noop{}, nil, nil, nil, nil, nil, nil) + } else { + f, err = NewFrontend(ctx, ti.audit, ti.log, _env, ti.dbGroup, api.APIs, &noop.Noop{}, &noop.Noop{}, nil, nil, nil, nil, nil, nil) + } + if err != nil { + t.Fatal(err) + } + + clusterSyncSet, err := f._getAdminHiveSyncsetResources(ctx, tt.namespace) + cloudErr, isCloudErr := err.(*api.CloudError) + if tt.wantError != "" && isCloudErr && cloudErr != nil { + if tt.wantError != cloudErr.Error() { + fmt.Println("first condition") + fmt.Println(tt.wantError) + fmt.Println(cloudErr) + t.Fatalf("got %q but wanted %q", cloudErr.Error(), tt.wantError) + } + if tt.wantStatusCode != 0 && tt.wantStatusCode != cloudErr.StatusCode { + fmt.Println("second condition") + fmt.Println(tt.wantError) + fmt.Println(cloudErr) + t.Fatalf("got %q but wanted %q", cloudErr.Error(), tt.wantError) + } + } + + if !strings.EqualFold(string(clusterSyncSet), string(tt.wantResponse)) { + t.Fatalf("got %q and expected %q", clusterSyncSet, tt.wantResponse) + } + }) + } +} diff --git a/pkg/frontend/frontend.go b/pkg/frontend/frontend.go index 83e9d97b660..32fcd0c372b 100644 --- a/pkg/frontend/frontend.go +++ b/pkg/frontend/frontend.go @@ -307,6 +307,8 @@ func (f *frontend) chiAuthenticatedRoutes(router chi.Router) { r.Get("/clusterdeployment", f.getAdminHiveClusterDeployment) + r.Get("/clustersync", f.getAdminHiveSyncsetResources) + r.With(f.maintenanceMiddleware.UnplannedMaintenanceSignal).Post("/redeployvm", f.postAdminOpenShiftClusterRedeployVM) r.With(f.maintenanceMiddleware.UnplannedMaintenanceSignal).Post("/stopvm", f.postAdminOpenShiftClusterStopVM) diff --git a/pkg/hive/manager.go b/pkg/hive/manager.go index 920bbf53af4..b1c7be046ba 100644 --- a/pkg/hive/manager.go +++ b/pkg/hive/manager.go @@ -7,9 +7,11 @@ import ( "context" "errors" "fmt" + "log" "sort" hivev1 "github.com/openshift/hive/apis/hive/v1" + hivev1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" @@ -42,6 +44,7 @@ type ClusterManager interface { IsClusterInstallationComplete(ctx context.Context, doc *api.OpenShiftClusterDocument) (bool, error) GetClusterDeployment(ctx context.Context, doc *api.OpenShiftClusterDocument) (*hivev1.ClusterDeployment, error) ResetCorrelationData(ctx context.Context, doc *api.OpenShiftClusterDocument) error + GetSyncSetResources(ctx context.Context, doc *api.OpenShiftClusterDocument) (*hivev1alpha1.ClusterSync, error) } type clusterManager struct { @@ -262,3 +265,19 @@ func (hr *clusterManager) installLogsForLatestDeployment(ctx context.Context, cd return latestProvision.Spec.InstallLog, nil } + +func (hr *clusterManager) GetSyncSetResources(ctx context.Context, doc *api.OpenShiftClusterDocument) (*hivev1alpha1.ClusterSync, error) { + clusterSync := &hivev1alpha1.ClusterSync{} + + key := client.ObjectKey{ + Name: ClusterDeploymentName, // "cluster", + Namespace: doc.OpenShiftCluster.Properties.HiveProfile.Namespace, + } + + err := hr.hiveClientset.Get(ctx, key, clusterSync) + if err != nil { + log.Fatalf("Error getting ClusterSync resources: %s", err.Error()) + } + + return clusterSync, nil +} diff --git a/pkg/hive/manager_test.go b/pkg/hive/manager_test.go index 14518d6e867..cf7fb1bb112 100644 --- a/pkg/hive/manager_test.go +++ b/pkg/hive/manager_test.go @@ -10,6 +10,7 @@ import ( "testing" hivev1 "github.com/openshift/hive/apis/hive/v1" + hivev1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -548,3 +549,52 @@ func TestGetClusterDeployment(t *testing.T) { }) } } + +func TestGetClusterSyncforClusterDeployment(t *testing.T) { + fakeNamespace := "aro-00000000-0000-0000-0000-000000000000" + doc := &api.OpenShiftClusterDocument{ + OpenShiftCluster: &api.OpenShiftCluster{ + Properties: api.OpenShiftClusterProperties{ + HiveProfile: api.HiveProfile{ + Namespace: fakeNamespace, + }, + }, + }, + } + + cs := &hivev1alpha1.ClusterSync{ + ObjectMeta: metav1.ObjectMeta{ + Name: ClusterDeploymentName, + Namespace: fakeNamespace, + }, + } + + for _, tt := range []struct { + name string + wantErr string + }{ + {name: "syncset exists and returned"}, + {name: "selectorsyncsets exists and returned"}, + } { + t.Run(tt.name, func(t *testing.T) { + fakeClientBuilder := fake.NewClientBuilder() + if tt.wantErr == "" { + fakeClientBuilder = fakeClientBuilder.WithRuntimeObjects(cs) + } + c := clusterManager{ + hiveClientset: fakeClientBuilder.Build(), + log: logrus.NewEntry(logrus.StandardLogger()), + } + + result, err := c.GetSyncSetResources(context.Background(), doc) + if err != nil && err.Error() != tt.wantErr || + err == nil && tt.wantErr != "" { + t.Fatal(err) + } + + if result != nil && result.Name != cs.Name && result.Namespace != cs.Namespace { + t.Fatal("Unexpected cluster sync returned", result) + } + }) + } +} diff --git a/pkg/monitor/cluster/cluster.go b/pkg/monitor/cluster/cluster.go index d09d46f78e3..f8cb39d2780 100644 --- a/pkg/monitor/cluster/cluster.go +++ b/pkg/monitor/cluster/cluster.go @@ -22,6 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/hive" "github.com/Azure/ARO-RP/pkg/metrics" "github.com/Azure/ARO-RP/pkg/monitor/dimension" "github.com/Azure/ARO-RP/pkg/monitor/emitter" @@ -60,10 +61,12 @@ type Monitor struct { arodl *appsv1.DeploymentList } - wg *sync.WaitGroup + wg *sync.WaitGroup + hiveClusterManager hive.ClusterManager + doc *api.OpenShiftClusterDocument } -func NewMonitor(log *logrus.Entry, restConfig *rest.Config, oc *api.OpenShiftCluster, m metrics.Emitter, hiveRestConfig *rest.Config, hourlyRun bool, wg *sync.WaitGroup) (*Monitor, error) { +func NewMonitor(log *logrus.Entry, restConfig *rest.Config, oc *api.OpenShiftCluster, doc *api.OpenShiftClusterDocument, m metrics.Emitter, hiveRestConfig *rest.Config, hourlyRun bool, wg *sync.WaitGroup, hiveClusterManager hive.ClusterManager) (*Monitor, error) { r, err := azure.ParseResourceID(oc.ID) if err != nil { return nil, err @@ -126,16 +129,18 @@ func NewMonitor(log *logrus.Entry, restConfig *rest.Config, oc *api.OpenShiftClu oc: oc, dims: dims, - restconfig: restConfig, - cli: cli, - configcli: configcli, - maocli: maocli, - mcocli: mcocli, - arocli: arocli, - m: m, - ocpclientset: ocpclientset, - hiveclientset: hiveclientset, - wg: wg, + restconfig: restConfig, + cli: cli, + configcli: configcli, + maocli: maocli, + mcocli: mcocli, + arocli: arocli, + m: m, + ocpclientset: ocpclientset, + hiveclientset: hiveclientset, + wg: wg, + hiveClusterManager: hiveClusterManager, + doc: doc, }, nil } @@ -208,6 +213,7 @@ func (mon *Monitor) Monitor(ctx context.Context) (errs []error) { mon.emitJobConditions, mon.emitSummary, mon.emitHiveRegistrationStatus, + mon.emitClusterSync, mon.emitOperatorFlagsAndSupportBanner, mon.emitMaintenanceState, mon.emitCertificateExpirationStatuses, diff --git a/pkg/monitor/cluster/clustersync.go b/pkg/monitor/cluster/clustersync.go new file mode 100644 index 00000000000..291d2e6b52a --- /dev/null +++ b/pkg/monitor/cluster/clustersync.go @@ -0,0 +1,69 @@ +package cluster + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "context" + + "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (mon *Monitor) emitClusterSync(ctx context.Context) error { + clusterSync, err := mon.hiveClusterManager.GetSyncSetResources(ctx, mon.doc) + if err != nil { + return err + } + + if clusterSync != nil { + if clusterSync.Status.SyncSets != nil { + for _, s := range clusterSync.Status.SyncSets { + mon.emitGauge("hive.clustersync", 1, map[string]string{ + "metric": "SyncSets", + "name": s.Name, + "result": string(s.Result), + }) + + if mon.hourlyRun { + mon.log.WithFields(logrus.Fields{ + "metric": "SyncSets", + "name": s.Name, + "result": string(s.Result), + "firstSuccessTime": timeToString(s.FirstSuccessTime), + "lastTransitionTime": timeToString(&s.LastTransitionTime), + "failureMessage": s.FailureMessage, + }).Print() + } + } + } + if clusterSync.Status.SelectorSyncSets != nil { + for _, s := range clusterSync.Status.SelectorSyncSets { + mon.emitGauge("hive.clustersync", 1, map[string]string{ + "metric": "SelectorSyncSets", + "name": s.Name, + "result": string(s.Result), + }) + + if mon.hourlyRun { + mon.log.WithFields(logrus.Fields{ + "metric": "SelectorSyncSets", + "name": s.Name, + "result": string(s.Result), + "firstSuccessTime": timeToString(s.FirstSuccessTime), + "lastTransitionTime": timeToString(&s.LastTransitionTime), + "failureMessage": s.FailureMessage, + }).Print() + } + } + } + } + return nil +} + +func timeToString(t *metav1.Time) string { + if t == nil { + return "" + } + return t.String() +} diff --git a/pkg/monitor/worker.go b/pkg/monitor/worker.go index d10b05b081d..37bb5777f0d 100644 --- a/pkg/monitor/worker.go +++ b/pkg/monitor/worker.go @@ -15,6 +15,8 @@ import ( "k8s.io/client-go/rest" "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/env" + "github.com/Azure/ARO-RP/pkg/hive" "github.com/Azure/ARO-RP/pkg/monitor/azure/nsg" "github.com/Azure/ARO-RP/pkg/monitor/cluster" "github.com/Azure/ARO-RP/pkg/monitor/dimension" @@ -281,9 +283,16 @@ func (mon *monitor) workOne(ctx context.Context, log *logrus.Entry, doc *api.Ope var monitors []monitoring.Monitor var wg sync.WaitGroup + _env, err := env.NewEnv(ctx, log, env.COMPONENT_MONITOR) + if err != nil { + log.Error(err) + return + } + hiveClusterManager, _ := hive.NewFromConfig(log, _env, hiveRestConfig) + nsgMon := nsg.NewMonitor(log, doc.OpenShiftCluster, mon.env, sub.ID, sub.Subscription.Properties.TenantID, mon.clusterm, dims, &wg, nsgMonTicker.C) - c, err := cluster.NewMonitor(log, restConfig, doc.OpenShiftCluster, mon.clusterm, hiveRestConfig, hourlyRun, &wg) + c, err := cluster.NewMonitor(log, restConfig, doc.OpenShiftCluster, doc, mon.clusterm, hiveRestConfig, hourlyRun, &wg, hiveClusterManager) if err != nil { log.Error(err) mon.m.EmitGauge("monitor.cluster.failedworker", 1, map[string]string{ diff --git a/pkg/util/mocks/hive/hive.go b/pkg/util/mocks/hive/hive.go index dd6e22e2715..61fe880ea13 100644 --- a/pkg/util/mocks/hive/hive.go +++ b/pkg/util/mocks/hive/hive.go @@ -10,6 +10,7 @@ import ( gomock "github.com/golang/mock/gomock" v1 "github.com/openshift/hive/apis/hive/v1" + v1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1" v10 "k8s.io/api/core/v1" api "github.com/Azure/ARO-RP/pkg/api" @@ -96,6 +97,21 @@ func (mr *MockClusterManagerMockRecorder) GetClusterDeployment(arg0, arg1 interf return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterDeployment", reflect.TypeOf((*MockClusterManager)(nil).GetClusterDeployment), arg0, arg1) } +// GetSyncSetResources mocks base method. +func (m *MockClusterManager) GetSyncSetResources(arg0 context.Context, arg1 *api.OpenShiftClusterDocument) (*v1alpha1.ClusterSync, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSyncSetResources", arg0, arg1) + ret0, _ := ret[0].(*v1alpha1.ClusterSync) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSyncSetResources indicates an expected call of GetSyncSetResources. +func (mr *MockClusterManagerMockRecorder) GetSyncSetResources(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSyncSetResources", reflect.TypeOf((*MockClusterManager)(nil).GetSyncSetResources), arg0, arg1) +} + // Install mocks base method. func (m *MockClusterManager) Install(arg0 context.Context, arg1 *api.SubscriptionDocument, arg2 *api.OpenShiftClusterDocument, arg3 *api.OpenShiftVersion) error { m.ctrl.T.Helper() diff --git a/pkg/util/scheme/scheme.go b/pkg/util/scheme/scheme.go index e2353034399..481e80cfa1e 100644 --- a/pkg/util/scheme/scheme.go +++ b/pkg/util/scheme/scheme.go @@ -14,6 +14,7 @@ import ( securityv1 "github.com/openshift/api/security/v1" cloudcredentialv1 "github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1" hivev1 "github.com/openshift/hive/apis/hive/v1" + hivev1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1" mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" @@ -51,6 +52,7 @@ func init() { utilruntime.Must(operatorv1.AddToScheme(scheme.Scheme)) utilruntime.Must(cloudcredentialv1.AddToScheme(scheme.Scheme)) utilruntime.Must(hivev1.AddToScheme(scheme.Scheme)) + utilruntime.Must(hivev1alpha1.AddToScheme(scheme.Scheme)) utilruntime.Must(imageregistryv1.AddToScheme(scheme.Scheme)) utilruntime.Must(templatesv1.AddToScheme(scheme.Scheme)) } diff --git a/test/e2e/monitor.go b/test/e2e/monitor.go index cb979ca217f..954be14cc44 100644 --- a/test/e2e/monitor.go +++ b/test/e2e/monitor.go @@ -23,7 +23,9 @@ var _ = Describe("Monitor", func() { wg.Add(1) mon, err := cluster.NewMonitor(log, clients.RestConfig, &api.OpenShiftCluster{ ID: resourceIDFromEnv(), - }, &noop.Noop{}, nil, true, &wg) + }, &api.OpenShiftClusterDocument{ + ID: resourceIDFromEnv(), + }, &noop.Noop{}, nil, true, &wg, nil) Expect(err).NotTo(HaveOccurred()) By("running the monitor once") diff --git a/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/clustersync_types.go b/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/clustersync_types.go new file mode 100644 index 00000000000..7f330d08947 --- /dev/null +++ b/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/clustersync_types.go @@ -0,0 +1,163 @@ +package v1alpha1 + +import ( + hivev1 "github.com/openshift/hive/apis/hive/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterSync is the status of all of the SelectorSyncSets and SyncSets that apply to a ClusterDeployment. +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=clustersyncs,shortName=csync,scope=Namespaced +// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.conditions[0].reason` +// +kubebuilder:printcolumn:name="Message",type=string,priority=1,JSONPath=`.status.conditions[?(@.type=="Failed")].message` +type ClusterSync struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterSyncSpec `json:"spec,omitempty"` + Status ClusterSyncStatus `json:"status,omitempty"` +} + +// ClusterSyncSpec defines the desired state of ClusterSync +type ClusterSyncSpec struct{} + +// ClusterSyncStatus defines the observed state of ClusterSync +type ClusterSyncStatus struct { + // SyncSets is the sync status of all of the SyncSets for the cluster. + // +optional + SyncSets []SyncStatus `json:"syncSets,omitempty"` + + // SelectorSyncSets is the sync status of all of the SelectorSyncSets for the cluster. + // +optional + SelectorSyncSets []SyncStatus `json:"selectorSyncSets,omitempty"` + + // Conditions is a list of conditions associated with syncing to the cluster. + // +optional + Conditions []ClusterSyncCondition `json:"conditions,omitempty"` + + // FirstSuccessTime is the time we first successfully applied all (selector)syncsets to a cluster. + // +optional + FirstSuccessTime *metav1.Time `json:"firstSuccessTime,omitempty"` + + // ControlledByReplica indicates which replica of the hive-clustersync StatefulSet is responsible + // for (the CD related to) this clustersync. Note that this value indicates the replica that most + // recently handled the ClusterSync. If the hive-clustersync statefulset is scaled up or down, the + // controlling replica can change, potentially causing logs to be spread across multiple pods. + ControlledByReplica *int64 `json:"controlledByReplica,omitempty"` +} + +// SyncStatus is the status of applying a specific SyncSet or SelectorSyncSet to the cluster. +type SyncStatus struct { + // Name is the name of the SyncSet or SelectorSyncSet. + Name string `json:"name"` + + // ObservedGeneration is the generation of the SyncSet or SelectorSyncSet that was last observed. + ObservedGeneration int64 `json:"observedGeneration"` + + // ResourcesToDelete is the list of resources in the cluster that should be deleted when the SyncSet or SelectorSyncSet + // is deleted or is no longer matched to the cluster. + // +optional + ResourcesToDelete []SyncResourceReference `json:"resourcesToDelete,omitempty"` + + // Result is the result of the last attempt to apply the SyncSet or SelectorSyncSet to the cluster. + Result SyncSetResult `json:"result"` + + // FailureMessage is a message describing why the SyncSet or SelectorSyncSet could not be applied. This is only + // set when Result is Failure. + // +optional + FailureMessage string `json:"failureMessage,omitempty"` + + // LastTransitionTime is the time when this status last changed. + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + + // FirstSuccessTime is the time when the SyncSet or SelectorSyncSet was first successfully applied to the cluster. + // +optional + FirstSuccessTime *metav1.Time `json:"firstSuccessTime,omitempty"` +} + +// SyncResourceReference is a reference to a resource that is synced to a cluster via a SyncSet or SelectorSyncSet. +type SyncResourceReference struct { + // APIVersion is the Group and Version of the resource. + APIVersion string `json:"apiVersion"` + + // Kind is the Kind of the resource. + // +optional + Kind string `json:"kind"` + + // Name is the name of the resource. + Name string `json:"name"` + + // Namespace is the namespace of the resource. + // +optional + Namespace string `json:"namespace,omitempty"` +} + +// SyncSetResult is the result of a sync attempt. +// +kubebuilder:validation:Enum=Success;Failure +type SyncSetResult string + +const ( + // SuccessSyncSetResult is the result when the SyncSet or SelectorSyncSet was applied successfully to the cluster. + SuccessSyncSetResult SyncSetResult = "Success" + + // FailureSyncSetResult is the result when there was an error when attempting to apply the SyncSet or SelectorSyncSet + // to the cluster + FailureSyncSetResult SyncSetResult = "Failure" +) + +// ClusterSyncCondition contains details for the current condition of a ClusterSync +type ClusterSyncCondition struct { + // Type is the type of the condition. + Type ClusterSyncConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about the last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterSyncConditionType is a valid value for ClusterSyncCondition.Type +type ClusterSyncConditionType string + +// ConditionType satisfies the generics.Condition interface +func (c ClusterSyncCondition) ConditionType() hivev1.ConditionType { + return c.Type +} + +// String satisfies the generics.ConditionType interface +func (t ClusterSyncConditionType) String() string { + return string(t) +} + +const ( + // ClusterSyncFailed is the type of condition used to indicate whether there are SyncSets or SelectorSyncSets which + // have not been applied due to an error. + ClusterSyncFailed ClusterSyncConditionType = "Failed" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterSyncList contains a list of ClusterSync +type ClusterSyncList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterSync `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterSync{}, &ClusterSyncList{}) +} diff --git a/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/clustersynclease_types.go b/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/clustersynclease_types.go new file mode 100644 index 00000000000..aa0285c90ac --- /dev/null +++ b/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/clustersynclease_types.go @@ -0,0 +1,37 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterSyncLease is a record of the last time that SyncSets and SelectorSyncSets were applied to a cluster. +// +k8s:openapi-gen=true +// +kubebuilder:resource:path=clustersyncleases,shortName=csl,scope=Namespaced +type ClusterSyncLease struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterSyncLeaseSpec `json:"spec,omitempty"` +} + +// ClusterSyncLeaseSpec is the specification of a ClusterSyncLease. +type ClusterSyncLeaseSpec struct { + // RenewTime is the time when SyncSets and SelectorSyncSets were last applied to the cluster. + RenewTime metav1.MicroTime `json:"renewTime"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterSyncLeaseList contains a list of ClusterSyncLeases. +type ClusterSyncLeaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterSyncLease `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterSyncLease{}, &ClusterSyncLeaseList{}) +} diff --git a/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/doc.go b/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/doc.go new file mode 100644 index 00000000000..e967080c4a9 --- /dev/null +++ b/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/doc.go @@ -0,0 +1,7 @@ +// Package v1alpha1 contains API Schema definitions for the hiveinternal v1alpha1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/hive/apis/hiveinternal +// +k8s:defaulter-gen=TypeMeta +// +groupName=hiveinternal.openshift.io +package v1alpha1 diff --git a/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/fakeclusterinstall_types.go b/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/fakeclusterinstall_types.go new file mode 100644 index 00000000000..bbc2598c9d0 --- /dev/null +++ b/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/fakeclusterinstall_types.go @@ -0,0 +1,56 @@ +package v1alpha1 + +import ( + hivev1 "github.com/openshift/hive/apis/hive/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// FakeClusterInstallSpec defines the desired state of the FakeClusterInstall. +type FakeClusterInstallSpec struct { + + // ImageSetRef is a reference to a ClusterImageSet. The release image specified in the ClusterImageSet will be used + // to install the cluster. + ImageSetRef hivev1.ClusterImageSetReference `json:"imageSetRef"` + + // ClusterDeploymentRef is a reference to the ClusterDeployment associated with this AgentClusterInstall. + ClusterDeploymentRef corev1.LocalObjectReference `json:"clusterDeploymentRef"` + + // ClusterMetadata contains metadata information about the installed cluster. It should be populated once the cluster install is completed. (it can be populated sooner if desired, but Hive will not copy back to ClusterDeployment until the Installed condition goes True. + ClusterMetadata *hivev1.ClusterMetadata `json:"clusterMetadata,omitempty"` +} + +// FakeClusterInstallStatus defines the observed state of the FakeClusterInstall. +type FakeClusterInstallStatus struct { + // Conditions includes more detailed status for the cluster install. + // +optional + Conditions []hivev1.ClusterInstallCondition `json:"conditions,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// FakeClusterInstall represents a fake request to provision an agent based cluster. +// +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +type FakeClusterInstall struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FakeClusterInstallSpec `json:"spec"` + Status FakeClusterInstallStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// FakeClusterInstallList contains a list of FakeClusterInstall +type FakeClusterInstallList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FakeClusterInstall `json:"items"` +} + +func init() { + SchemeBuilder.Register(&FakeClusterInstall{}, &FakeClusterInstallList{}) +} diff --git a/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/register.go b/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/register.go new file mode 100644 index 00000000000..e2707527729 --- /dev/null +++ b/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/register.go @@ -0,0 +1,36 @@ +// NOTE: Boilerplate only. Ignore this file. + +// Package v1alpha1 contains API Schema definitions for the hiveinternal v1alpha1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/hive/apis/hiveinternal +// +k8s:defaulter-gen=TypeMeta +// +groupName=hiveinternal.openshift.io +package v1alpha1 + +import ( + "github.com/openshift/hive/apis/scheme" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + // HiveInternalAPIGroup is the group that all hiveinternal objects belong to in the API server. + HiveInternalAPIGroup = "hiveinternal.openshift.io" + + // HiveInternalAPIVersion is the api version that all hiveinternal objects are currently at. + HiveInternalAPIVersion = "v1alpha1" + + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: HiveInternalAPIGroup, Version: HiveInternalAPIVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme is a shortcut for SchemeBuilder.AddToScheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..62ba17c9349 --- /dev/null +++ b/vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,378 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "github.com/openshift/hive/apis/hive/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSync) DeepCopyInto(out *ClusterSync) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSync. +func (in *ClusterSync) DeepCopy() *ClusterSync { + if in == nil { + return nil + } + out := new(ClusterSync) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterSync) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncCondition) DeepCopyInto(out *ClusterSyncCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncCondition. +func (in *ClusterSyncCondition) DeepCopy() *ClusterSyncCondition { + if in == nil { + return nil + } + out := new(ClusterSyncCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncLease) DeepCopyInto(out *ClusterSyncLease) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncLease. +func (in *ClusterSyncLease) DeepCopy() *ClusterSyncLease { + if in == nil { + return nil + } + out := new(ClusterSyncLease) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterSyncLease) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncLeaseList) DeepCopyInto(out *ClusterSyncLeaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterSyncLease, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncLeaseList. +func (in *ClusterSyncLeaseList) DeepCopy() *ClusterSyncLeaseList { + if in == nil { + return nil + } + out := new(ClusterSyncLeaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterSyncLeaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncLeaseSpec) DeepCopyInto(out *ClusterSyncLeaseSpec) { + *out = *in + in.RenewTime.DeepCopyInto(&out.RenewTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncLeaseSpec. +func (in *ClusterSyncLeaseSpec) DeepCopy() *ClusterSyncLeaseSpec { + if in == nil { + return nil + } + out := new(ClusterSyncLeaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncList) DeepCopyInto(out *ClusterSyncList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterSync, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncList. +func (in *ClusterSyncList) DeepCopy() *ClusterSyncList { + if in == nil { + return nil + } + out := new(ClusterSyncList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterSyncList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncSpec) DeepCopyInto(out *ClusterSyncSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncSpec. +func (in *ClusterSyncSpec) DeepCopy() *ClusterSyncSpec { + if in == nil { + return nil + } + out := new(ClusterSyncSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncStatus) DeepCopyInto(out *ClusterSyncStatus) { + *out = *in + if in.SyncSets != nil { + in, out := &in.SyncSets, &out.SyncSets + *out = make([]SyncStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SelectorSyncSets != nil { + in, out := &in.SelectorSyncSets, &out.SelectorSyncSets + *out = make([]SyncStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterSyncCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FirstSuccessTime != nil { + in, out := &in.FirstSuccessTime, &out.FirstSuccessTime + *out = (*in).DeepCopy() + } + if in.ControlledByReplica != nil { + in, out := &in.ControlledByReplica, &out.ControlledByReplica + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncStatus. +func (in *ClusterSyncStatus) DeepCopy() *ClusterSyncStatus { + if in == nil { + return nil + } + out := new(ClusterSyncStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FakeClusterInstall) DeepCopyInto(out *FakeClusterInstall) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FakeClusterInstall. +func (in *FakeClusterInstall) DeepCopy() *FakeClusterInstall { + if in == nil { + return nil + } + out := new(FakeClusterInstall) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FakeClusterInstall) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FakeClusterInstallList) DeepCopyInto(out *FakeClusterInstallList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FakeClusterInstall, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FakeClusterInstallList. +func (in *FakeClusterInstallList) DeepCopy() *FakeClusterInstallList { + if in == nil { + return nil + } + out := new(FakeClusterInstallList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FakeClusterInstallList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FakeClusterInstallSpec) DeepCopyInto(out *FakeClusterInstallSpec) { + *out = *in + out.ImageSetRef = in.ImageSetRef + out.ClusterDeploymentRef = in.ClusterDeploymentRef + if in.ClusterMetadata != nil { + in, out := &in.ClusterMetadata, &out.ClusterMetadata + *out = new(v1.ClusterMetadata) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FakeClusterInstallSpec. +func (in *FakeClusterInstallSpec) DeepCopy() *FakeClusterInstallSpec { + if in == nil { + return nil + } + out := new(FakeClusterInstallSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FakeClusterInstallStatus) DeepCopyInto(out *FakeClusterInstallStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.ClusterInstallCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FakeClusterInstallStatus. +func (in *FakeClusterInstallStatus) DeepCopy() *FakeClusterInstallStatus { + if in == nil { + return nil + } + out := new(FakeClusterInstallStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncResourceReference) DeepCopyInto(out *SyncResourceReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncResourceReference. +func (in *SyncResourceReference) DeepCopy() *SyncResourceReference { + if in == nil { + return nil + } + out := new(SyncResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncStatus) DeepCopyInto(out *SyncStatus) { + *out = *in + if in.ResourcesToDelete != nil { + in, out := &in.ResourcesToDelete, &out.ResourcesToDelete + *out = make([]SyncResourceReference, len(*in)) + copy(*out, *in) + } + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + if in.FirstSuccessTime != nil { + in, out := &in.FirstSuccessTime, &out.FirstSuccessTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncStatus. +func (in *SyncStatus) DeepCopy() *SyncStatus { + if in == nil { + return nil + } + out := new(SyncStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 15df2b4041b..85f82da678a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1151,7 +1151,7 @@ github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1 # github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 ## explicit; go 1.12 github.com/openshift/custom-resource-status/conditions/v1 -# github.com/openshift/hive/apis v0.0.0-20240812130639-bdf9d08a060a => github.com/openshift/hive/apis v0.0.0-20231116161336-9dd47f8bfa1f +# github.com/openshift/hive/apis v0.0.0-20240821011206-1ec27ad45d5a => github.com/openshift/hive/apis v0.0.0-20231116161336-9dd47f8bfa1f ## explicit; go 1.20 github.com/openshift/hive/apis/hive/v1 github.com/openshift/hive/apis/hive/v1/agent @@ -1166,6 +1166,7 @@ github.com/openshift/hive/apis/hive/v1/none github.com/openshift/hive/apis/hive/v1/openstack github.com/openshift/hive/apis/hive/v1/ovirt github.com/openshift/hive/apis/hive/v1/vsphere +github.com/openshift/hive/apis/hiveinternal/v1alpha1 github.com/openshift/hive/apis/scheme # github.com/openshift/library-go v0.0.0-20220525173854-9b950a41acdc => github.com/openshift/library-go v0.0.0-20230222114049-eac44a078a6e ## explicit; go 1.17