diff --git a/internal/cmd/manager/instance/join/cmd.go b/internal/cmd/manager/instance/join/cmd.go index 424ff8bb41..449f92563a 100644 --- a/internal/cmd/manager/instance/join/cmd.go +++ b/internal/cmd/manager/instance/join/cmd.go @@ -53,14 +53,13 @@ func NewCmd() *cobra.Command { }, RunE: func(cmd *cobra.Command, _ []string) error { ctx := cmd.Context() - instance := postgres.NewInstance() - - // The following are needed to correctly + // The fields in the instance are needed to correctly // download the secret containing the TLS // certificates - instance.Namespace = namespace - instance.PodName = podName - instance.ClusterName = clusterName + instance := postgres.NewInstance(). + WithNamespace(namespace). + WithPodName(podName). + WithClusterName(clusterName) info := postgres.InitInfo{ PgData: pgData, @@ -112,7 +111,7 @@ func joinSubCommand(ctx context.Context, instance *postgres.Instance, info postg // Download the cluster definition from the API server var cluster apiv1.Cluster if err := reconciler.GetClient().Get(ctx, - ctrl.ObjectKey{Namespace: instance.Namespace, Name: instance.ClusterName}, + ctrl.ObjectKey{Namespace: instance.GetNamespaceName(), Name: instance.GetClusterName()}, &cluster, ); err != nil { log.Error(err, "Error while getting cluster") diff --git a/internal/cmd/manager/instance/run/cmd.go b/internal/cmd/manager/instance/run/cmd.go index d4f78f0f21..1d37ac850a 100644 --- a/internal/cmd/manager/instance/run/cmd.go +++ b/internal/cmd/manager/instance/run/cmd.go @@ -88,12 +88,12 @@ func NewCmd() *cobra.Command { }, RunE: func(cmd *cobra.Command, _ []string) error { ctx := log.IntoContext(cmd.Context(), log.GetLogger()) - instance := postgres.NewInstance() + instance := postgres.NewInstance(). + WithPodName(podName). + WithClusterName(clusterName). + WithNamespace(namespace) instance.PgData = pgData - instance.Namespace = namespace - instance.PodName = podName - instance.ClusterName = clusterName instance.StatusPortTLS = statusPortTLS instance.MetricsPortTLS = metricsPortTLS @@ -152,14 +152,14 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { Cache: cache.Options{ ByObject: map[client.Object]cache.ByObject{ &apiv1.Cluster{}: { - Field: fields.OneTermEqualSelector("metadata.name", instance.ClusterName), + Field: fields.OneTermEqualSelector("metadata.name", instance.GetClusterName()), Namespaces: map[string]cache.Config{ - instance.Namespace: {}, + instance.GetNamespaceName(): {}, }, }, &apiv1.Database{}: { Namespaces: map[string]cache.Config{ - instance.Namespace: {}, + instance.GetNamespaceName(): {}, }, }, }, diff --git a/internal/management/controller/cache.go b/internal/management/controller/cache.go index bc031de24d..4d3b06a2b6 100644 --- a/internal/management/controller/cache.go +++ b/internal/management/controller/cache.go @@ -47,7 +47,7 @@ func (r *InstanceReconciler) updateCacheFromCluster(ctx context.Context, cluster } func (r *InstanceReconciler) updateWALRestoreSettingsCache(ctx context.Context, cluster *apiv1.Cluster) { - _, env, barmanConfiguration, err := walrestore.GetRecoverConfiguration(cluster, r.instance.PodName) + _, env, barmanConfiguration, err := walrestore.GetRecoverConfiguration(cluster, r.instance.GetPodName()) if errors.Is(err, walrestore.ErrNoBackupConfigured) { cache.Delete(cache.WALRestoreKey) return diff --git a/internal/management/controller/database_controller_test.go b/internal/management/controller/database_controller_test.go index 99a1231fe9..81a8373b86 100644 --- a/internal/management/controller/database_controller_test.go +++ b/internal/management/controller/database_controller_test.go @@ -86,11 +86,10 @@ var _ = Describe("Managed Database status", func() { db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - pgInstance := &postgres.Instance{ - Namespace: "default", - PodName: "cluster-example-1", - ClusterName: "cluster-example", - } + pgInstance := postgres.NewInstance(). + WithNamespace("default"). + WithPodName("cluster-example-1"). + WithClusterName("cluster-example") f := fakeInstanceData{ Instance: pgInstance, diff --git a/internal/management/controller/externalservers/manager.go b/internal/management/controller/externalservers/manager.go index 0c6eb00687..db488336aa 100644 --- a/internal/management/controller/externalservers/manager.go +++ b/internal/management/controller/externalservers/manager.go @@ -55,8 +55,8 @@ func (r *Reconciler) getCluster(ctx context.Context) (*apiv1.Cluster, error) { var cluster apiv1.Cluster err := r.client.Get(ctx, types.NamespacedName{ - Namespace: r.instance.Namespace, - Name: r.instance.ClusterName, + Namespace: r.instance.GetNamespaceName(), + Name: r.instance.GetClusterName(), }, &cluster) if err != nil { diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index 518ce96207..358a6e20ad 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -239,14 +239,14 @@ func (r *InstanceReconciler) Reconcile( if result, err := reconciler.ReconcileReplicationSlots( ctx, - r.instance.PodName, + r.instance.GetPodName(), infrastructure.NewPostgresManager(r.instance.ConnectionPool()), cluster, ); err != nil || !result.IsZero() { return result, err } - if r.instance.PodName == cluster.Status.CurrentPrimary { + if r.instance.GetPodName() == cluster.Status.CurrentPrimary { result, err := roles.Reconcile(ctx, r.instance, cluster, r.client) if err != nil || !result.IsZero() { return result, err @@ -296,7 +296,7 @@ func (r *InstanceReconciler) Reconcile( } func (r *InstanceReconciler) configureSlotReplicator(cluster *apiv1.Cluster) { - switch r.instance.PodName { + switch r.instance.GetPodName() { case cluster.Status.CurrentPrimary, cluster.Status.TargetPrimary: r.instance.ConfigureSlotReplicator(nil) default: @@ -308,7 +308,7 @@ func (r *InstanceReconciler) restartPrimaryInplaceIfRequested( ctx context.Context, cluster *apiv1.Cluster, ) (bool, error) { - isPrimary := cluster.Status.CurrentPrimary == r.instance.PodName + isPrimary := cluster.Status.CurrentPrimary == r.instance.GetPodName() restartRequested := isPrimary && cluster.Status.Phase == apiv1.PhaseInplacePrimaryRestart if restartRequested { if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { @@ -366,7 +366,7 @@ func (r *InstanceReconciler) refreshConfigurationFiles( } func (r *InstanceReconciler) reconcileFencing(ctx context.Context, cluster *apiv1.Cluster) *reconcile.Result { - fencingRequired := cluster.IsInstanceFenced(r.instance.PodName) + fencingRequired := cluster.IsInstanceFenced(r.instance.GetPodName()) isFenced := r.instance.IsFenced() switch { case !isFenced && fencingRequired: @@ -411,7 +411,7 @@ func (r *InstanceReconciler) initialize(ctx context.Context, cluster *apiv1.Clus return err } - r.instance.SetFencing(cluster.IsInstanceFenced(r.instance.PodName)) + r.instance.SetFencing(cluster.IsInstanceFenced(r.instance.GetPodName())) return nil } @@ -428,7 +428,8 @@ func (r *InstanceReconciler) verifyParametersForFollower(cluster *apiv1.Cluster) // we use a file as a flag to ensure the pod has been restarted already. I.e. on // newly created pod we don't need to check the enforced parameters - filename := path.Join(r.instance.PgData, fmt.Sprintf("%s-%s", constants.Startup, r.instance.PodName)) + filename := path.Join(r.instance.PgData, fmt.Sprintf("%s-%s", + constants.Startup, r.instance.GetPodName())) exists, err := fileutils.FileExists(filename) if err != nil { return err @@ -482,7 +483,7 @@ func (r *InstanceReconciler) reconcileOldPrimary( ) (restarted bool, err error) { contextLogger := log.FromContext(ctx) - if cluster.Status.TargetPrimary == r.instance.PodName { + if cluster.Status.TargetPrimary == r.instance.GetPodName() { return false, nil } @@ -744,7 +745,7 @@ func (r *InstanceReconciler) reconcileClusterRoleWithoutDB( return false, err } // Reconcile replica role - if cluster.Status.TargetPrimary != r.instance.PodName { + if cluster.Status.TargetPrimary != r.instance.GetPodName() { if !isPrimary { // We need to ensure that this instance is replicating from the correct server return r.instance.RefreshReplicaConfiguration(ctx, cluster, r.client) @@ -767,7 +768,7 @@ func (r *InstanceReconciler) reconcileMetrics( exporter := r.metricsServerExporter // We should never reset the SwitchoverRequired metrics as it needs the primary instance restarts, // however, if the cluster is healthy we make sure it is set to 0. - if cluster.Status.CurrentPrimary == r.instance.PodName { + if cluster.Status.CurrentPrimary == r.instance.GetPodName() { if cluster.Status.Phase == apiv1.PhaseWaitingForUser { exporter.Metrics.SwitchoverRequired.Set(1) } else { @@ -814,7 +815,7 @@ func (r *InstanceReconciler) reconcileMonitoringQueries( var configMap corev1.ConfigMap err := r.GetClient().Get( ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: reference.Name}, + client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: reference.Name}, &configMap) if err != nil { contextLogger.Warning("Unable to get configMap containing custom monitoring queries", @@ -841,7 +842,12 @@ func (r *InstanceReconciler) reconcileMonitoringQueries( for _, reference := range cluster.Spec.Monitoring.CustomQueriesSecret { var secret corev1.Secret - err := r.GetClient().Get(ctx, client.ObjectKey{Namespace: r.instance.Namespace, Name: reference.Name}, &secret) + err := r.GetClient().Get(ctx, + client.ObjectKey{ + Namespace: r.instance.GetNamespaceName(), + Name: reference.Name, + }, + &secret) if err != nil { contextLogger.Warning("Unable to get secret containing custom monitoring queries", "reference", reference, @@ -1177,7 +1183,7 @@ func (r *InstanceReconciler) refreshFileFromSecret( func (r *InstanceReconciler) reconcilePrimary(ctx context.Context, cluster *apiv1.Cluster) error { contextLogger := log.FromContext(ctx) - if cluster.Status.TargetPrimary != r.instance.PodName || cluster.IsReplica() { + if cluster.Status.TargetPrimary != r.instance.GetPodName() || cluster.IsReplica() { return nil } @@ -1206,8 +1212,8 @@ func (r *InstanceReconciler) reconcilePrimary(ctx context.Context, cluster *apiv } // if the currentPrimary doesn't match the PodName we set the correct value. - if cluster.Status.CurrentPrimary != r.instance.PodName { - cluster.Status.CurrentPrimary = r.instance.PodName + if cluster.Status.CurrentPrimary != r.instance.GetPodName() { + cluster.Status.CurrentPrimary = r.instance.GetPodName() cluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp() if err := r.client.Status().Patch(ctx, cluster, client.MergeFrom(oldCluster)); err != nil { @@ -1238,7 +1244,7 @@ func (r *InstanceReconciler) reconcilePrimary(ctx context.Context, cluster *apiv func (r *InstanceReconciler) handlePromotion(ctx context.Context, cluster *apiv1.Cluster) error { contextLogger := log.FromContext(ctx) contextLogger.Info("I'm the target primary, wait for the wal_receiver to be terminated") - if r.instance.PodName != cluster.Status.CurrentPrimary { + if r.instance.GetPodName() != cluster.Status.CurrentPrimary { // if the cluster is not replicating it means it's doing a failover and // we have to wait for wal receivers to be down err := r.waitForWalReceiverDown() @@ -1262,7 +1268,7 @@ func (r *InstanceReconciler) reconcileDesignatedPrimary( cluster *apiv1.Cluster, ) (changed bool, err error) { // If I'm already the current designated primary everything is ok. - if cluster.Status.CurrentPrimary == r.instance.PodName && !r.instance.RequiresDesignatedPrimaryTransition { + if cluster.Status.CurrentPrimary == r.instance.GetPodName() && !r.instance.RequiresDesignatedPrimaryTransition { return false, nil } @@ -1276,7 +1282,7 @@ func (r *InstanceReconciler) reconcileDesignatedPrimary( log.FromContext(ctx).Info("Setting myself as the current designated primary") oldCluster := cluster.DeepCopy() - cluster.Status.CurrentPrimary = r.instance.PodName + cluster.Status.CurrentPrimary = r.instance.GetPodName() cluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp() if r.instance.RequiresDesignatedPrimaryTransition { externalcluster.SetDesignatedPrimaryTransitionCompleted(cluster) @@ -1350,7 +1356,7 @@ func (r *InstanceReconciler) reconcileUser(ctx context.Context, username string, var secret corev1.Secret err := r.GetClient().Get( ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: secretName}, + client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: secretName}, &secret) if err != nil { if apierrors.IsNotFound(err) { @@ -1393,7 +1399,7 @@ func (r *InstanceReconciler) refreshPGHBA(ctx context.Context, cluster *apiv1.Cl err := r.GetClient().Get(ctx, types.NamespacedName{ Name: ldapSecretName, - Namespace: r.instance.Namespace, + Namespace: r.instance.GetNamespaceName(), }, &ldapBindPasswordSecret) if err != nil { return false, err @@ -1454,7 +1460,7 @@ func (r *InstanceReconciler) dropStaleReplicationConnections( return ctrl.Result{}, nil } - if cluster.Status.CurrentPrimary == r.instance.PodName { + if cluster.Status.CurrentPrimary == r.instance.GetPodName() { return ctrl.Result{}, nil } diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go index 160d8687b6..caeeda9c55 100644 --- a/internal/management/controller/instance_startup.go +++ b/internal/management/controller/instance_startup.go @@ -48,7 +48,7 @@ func (r *InstanceReconciler) refreshServerCertificateFiles(ctx context.Context, func() error { err := r.GetClient().Get( ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: cluster.Status.Certificates.ServerTLSSecret}, + client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: cluster.Status.Certificates.ServerTLSSecret}, &secret) if err != nil { contextLogger.Info("Error accessing server TLS Certificate. Retrying with exponential backoff.", @@ -86,7 +86,7 @@ func (r *InstanceReconciler) refreshReplicationUserCertificate( var secret corev1.Secret err := r.GetClient().Get( ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: cluster.Status.Certificates.ReplicationTLSSecret}, + client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: cluster.Status.Certificates.ReplicationTLSSecret}, &secret) if err != nil { return false, err @@ -105,7 +105,7 @@ func (r *InstanceReconciler) refreshClientCA(ctx context.Context, cluster *apiv1 var secret corev1.Secret err := r.GetClient().Get( ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: cluster.Status.Certificates.ClientCASecret}, + client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: cluster.Status.Certificates.ClientCASecret}, &secret) if err != nil { return false, err @@ -120,7 +120,7 @@ func (r *InstanceReconciler) refreshServerCA(ctx context.Context, cluster *apiv1 var secret corev1.Secret err := r.GetClient().Get( ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: cluster.Status.Certificates.ServerCASecret}, + client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: cluster.Status.Certificates.ServerCASecret}, &secret) if err != nil { return false, err @@ -148,7 +148,7 @@ func (r *InstanceReconciler) refreshBarmanEndpointCA(ctx context.Context, cluste var secret corev1.Secret err := r.GetClient().Get( ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: secretKeySelector.Name}, + client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: secretKeySelector.Name}, &secret) if err != nil { return false, err @@ -194,7 +194,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context "of the cluster is resumed, demoting immediately") return r.instance.Demote(ctx, cluster) - case targetPrimary == r.instance.PodName: + case targetPrimary == r.instance.GetPodName(): if currentPrimary == "" { // This means that this cluster has been just started up and the // current primary still need to be written @@ -203,7 +203,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context "targetPrimary", targetPrimary) oldCluster := cluster.DeepCopy() - cluster.Status.CurrentPrimary = r.instance.PodName + cluster.Status.CurrentPrimary = r.instance.GetPodName() cluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp() return r.client.Status().Patch(ctx, cluster, client.MergeFrom(oldCluster)) } @@ -349,12 +349,12 @@ func (r *InstanceReconciler) ReconcileTablespaces( mountPoint := specs.MountForTablespace(tbsName) if tbsMount, err := fileutils.FileExists(mountPoint); err != nil { contextLogger.Error(err, "while checking for mountpoint", "instance", - r.instance.PodName, "tablespace", tbsName) + r.instance.GetPodName(), "tablespace", tbsName) return err } else if !tbsMount { contextLogger.Error(fmt.Errorf("mountpoint not found"), "mountpoint for tablespaces is missing", - "instance", r.instance.PodName, "tablespace", tbsName) + "instance", r.instance.GetPodName(), "tablespace", tbsName) continue } @@ -369,7 +369,7 @@ func (r *InstanceReconciler) ReconcileTablespaces( if err != nil { contextLogger.Error(err, "could not create data dir in tablespace mount", - "instance", r.instance.PodName, "tablespace", tbsName) + "instance", r.instance.GetPodName(), "tablespace", tbsName) return fmt.Errorf("while creating data dir in tablespace %s: %w", mountPoint, err) } } diff --git a/internal/management/controller/manager.go b/internal/management/controller/manager.go index 9ea68245ae..b1c01130d7 100644 --- a/internal/management/controller/manager.go +++ b/internal/management/controller/manager.go @@ -85,8 +85,8 @@ func (r *InstanceReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, er var cluster apiv1.Cluster err := r.GetClient().Get(ctx, types.NamespacedName{ - Namespace: r.instance.Namespace, - Name: r.instance.ClusterName, + Namespace: r.instance.GetNamespaceName(), + Name: r.instance.GetClusterName(), }, &cluster) if err != nil { @@ -102,7 +102,7 @@ func (r *InstanceReconciler) GetSecret(ctx context.Context, name string) (*corev err := r.GetClient().Get(ctx, types.NamespacedName{ Name: name, - Namespace: r.instance.Namespace, + Namespace: r.instance.GetNamespaceName(), }, &secret) if err != nil { return nil, fmt.Errorf("while getting secret: %w", err) diff --git a/internal/management/controller/roles/runnable.go b/internal/management/controller/roles/runnable.go index dce73d51b1..1eed8f037d 100644 --- a/internal/management/controller/roles/runnable.go +++ b/internal/management/controller/roles/runnable.go @@ -138,8 +138,8 @@ func (sr *RoleSynchronizer) reconcile(ctx context.Context, config *apiv1.Managed var remoteCluster apiv1.Cluster if err = sr.client.Get(ctx, types.NamespacedName{ - Name: sr.instance.ClusterName, - Namespace: sr.instance.Namespace, + Name: sr.instance.GetClusterName(), + Namespace: sr.instance.GetNamespaceName(), }, &remoteCluster); err != nil { return err } @@ -154,8 +154,8 @@ func (sr *RoleSynchronizer) reconcile(ctx context.Context, config *apiv1.Managed } if err = sr.client.Get(ctx, types.NamespacedName{ - Name: sr.instance.ClusterName, - Namespace: sr.instance.Namespace, + Name: sr.instance.GetClusterName(), + Namespace: sr.instance.GetNamespaceName(), }, &remoteCluster); err != nil { return err } @@ -181,7 +181,7 @@ func (sr *RoleSynchronizer) synchronizeRoles( storedPasswordState map[string]apiv1.PasswordState, ) (map[string]apiv1.PasswordState, map[string][]string, error) { latestSecretResourceVersion, err := getPasswordSecretResourceVersion( - ctx, sr.client, config.Roles, sr.instance.Namespace) + ctx, sr.client, config.Roles, sr.instance.GetNamespaceName()) if err != nil { return nil, nil, err } @@ -320,7 +320,7 @@ func (sr *RoleSynchronizer) applyRoleCreateUpdate( fmt.Errorf("cannot reconcile: password both provided and disabled: %s", role.PasswordSecret.Name) case role.PasswordSecret != nil && !role.DisablePassword: - passwordSecret, err := getPassword(ctx, sr.client, role, sr.instance.Namespace) + passwordSecret, err := getPassword(ctx, sr.client, role, sr.instance.GetNamespaceName()) if err != nil { return apiv1.PasswordState{}, err } diff --git a/internal/management/controller/roles/runnable_test.go b/internal/management/controller/roles/runnable_test.go index ddd5f2414f..4ba41763c0 100644 --- a/internal/management/controller/roles/runnable_test.go +++ b/internal/management/controller/roles/runnable_test.go @@ -229,9 +229,7 @@ func (m *mockRoleManagerWithError) GetParentRoles(_ context.Context, role Databa var _ = Describe("Role synchronizer tests", func() { roleSynchronizer := RoleSynchronizer{ - instance: &postgres.Instance{ - Namespace: "myPod", - }, + instance: postgres.NewInstance().WithNamespace("myPod"), } When("role configurations are realizable", func() { diff --git a/internal/management/controller/slots/runner/runner.go b/internal/management/controller/slots/runner/runner.go index ac49f9b33e..221a5195e0 100644 --- a/internal/management/controller/slots/runner/runner.go +++ b/internal/management/controller/slots/runner/runner.go @@ -112,7 +112,7 @@ func (sr *Replicator) reconcile(ctx context.Context, config *apiv1.ReplicationSl ctx, infrastructure.NewPostgresManager(primaryPool), infrastructure.NewPostgresManager(localPool), - sr.instance.PodName, + sr.instance.GetPodName(), config, ) return err diff --git a/internal/management/controller/tablespaces/controller_test.go b/internal/management/controller/tablespaces/controller_test.go index 01cc58234a..4c5bf682ec 100644 --- a/internal/management/controller/tablespaces/controller_test.go +++ b/internal/management/controller/tablespaces/controller_test.go @@ -81,9 +81,7 @@ func (mst mockTablespaceStorageManager) getStorageLocation(tablespaceName string var _ = Describe("Tablespace synchronizer tests", func() { tablespaceReconciler := TablespaceReconciler{ - instance: &postgres.Instance{ - Namespace: "myPod", - }, + instance: postgres.NewInstance().WithNamespace("myPod"), } When("tablespace configurations are realizable", func() { diff --git a/internal/management/controller/tablespaces/manager.go b/internal/management/controller/tablespaces/manager.go index a484432d1e..1b793189dc 100644 --- a/internal/management/controller/tablespaces/manager.go +++ b/internal/management/controller/tablespaces/manager.go @@ -56,8 +56,8 @@ func (r *TablespaceReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, var cluster apiv1.Cluster err := r.GetClient().Get(ctx, types.NamespacedName{ - Namespace: r.instance.Namespace, - Name: r.instance.ClusterName, + Namespace: r.instance.GetNamespaceName(), + Name: r.instance.GetClusterName(), }, &cluster) if err != nil { diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 0496445681..54d1be07d2 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -142,13 +142,13 @@ type Instance struct { primaryPool *pool.ConnectionPool // The namespace of the k8s object representing this cluster - Namespace string + namespace string // The name of the Pod where the controller is executing - PodName string + podName string - // The name of the cluster of which this Pod is belonging - ClusterName string + // The name of the cluster this instance belongs in + clusterName string // The sha256 of the config. It is computed on the config string, before // adding the PostgreSQL CNPGConfigSha256 parameter @@ -367,6 +367,24 @@ func NewInstance() *Instance { } } +// WithNamespace specifies the namespace for this Instance +func (instance *Instance) WithNamespace(namespace string) *Instance { + instance.namespace = namespace + return instance +} + +// WithPodName specifies the pod name for this Instance +func (instance *Instance) WithPodName(podName string) *Instance { + instance.podName = podName + return instance +} + +// WithClusterName specifies the name of the cluster this Instance belongs to +func (instance *Instance) WithClusterName(clusterName string) *Instance { + instance.clusterName = clusterName + return instance +} + // RetryUntilServerAvailable is the default retry configuration that is used // to wait for a successful connection to a certain server var RetryUntilServerAvailable = wait.Backoff{ @@ -777,7 +795,7 @@ func (instance *Instance) Demote(ctx context.Context, cluster *apiv1.Cluster) er contextLogger := log.FromContext(ctx) contextLogger.Info("Demoting instance", "pgpdata", instance.PgData) - slotName := cluster.GetSlotNameFromInstanceName(instance.PodName) + slotName := cluster.GetSlotNameFromInstanceName(instance.GetPodName()) _, err := UpdateReplicaConfiguration(instance.PgData, instance.GetPrimaryConnInfo(), slotName) return err } @@ -1113,19 +1131,19 @@ func (instance *Instance) GetInstanceCommandChan() <-chan InstanceCommand { return instance.instanceCommandChan } -// GetClusterName returns the name of the cluster where this instance is running +// GetClusterName returns the name of the cluster where this instance belongs func (instance *Instance) GetClusterName() string { - return instance.ClusterName + return instance.clusterName } // GetPodName returns the name of the pod where this instance is running func (instance *Instance) GetPodName() string { - return instance.PodName + return instance.podName } // GetNamespaceName returns the name of the namespace where this instance is running func (instance *Instance) GetNamespaceName() string { - return instance.Namespace + return instance.namespace } // RequestFastImmediateShutdown request the lifecycle manager to shut down @@ -1250,7 +1268,7 @@ func (instance *Instance) DropConnections() error { // GetPrimaryConnInfo returns the DSN to reach the primary func (instance *Instance) GetPrimaryConnInfo() string { - return buildPrimaryConnInfo(instance.ClusterName+"-rw", instance.PodName) + return buildPrimaryConnInfo(instance.GetClusterName()+"-rw", instance.GetPodName()) } // HandleInstanceCommandRequests execute a command requested by the reconciliation diff --git a/pkg/management/postgres/instance_replica.go b/pkg/management/postgres/instance_replica.go index 5c681b95f8..84dca8e1a0 100644 --- a/pkg/management/postgres/instance_replica.go +++ b/pkg/management/postgres/instance_replica.go @@ -51,7 +51,7 @@ func (instance *Instance) RefreshReplicaConfiguration( return changed, nil } - if cluster.IsReplica() && cluster.Status.TargetPrimary == instance.PodName { + if cluster.IsReplica() && cluster.Status.TargetPrimary == instance.GetPodName() { result, err := instance.writeReplicaConfigurationForDesignatedPrimary(ctx, cli, cluster) return changed || result, err } @@ -60,7 +60,7 @@ func (instance *Instance) RefreshReplicaConfiguration( } func (instance *Instance) writeReplicaConfigurationForReplica(cluster *apiv1.Cluster) (changed bool, err error) { - slotName := cluster.GetSlotNameFromInstanceName(instance.PodName) + slotName := cluster.GetSlotNameFromInstanceName(instance.GetPodName()) return UpdateReplicaConfiguration(instance.PgData, instance.GetPrimaryConnInfo(), slotName) } @@ -75,7 +75,7 @@ func (instance *Instance) writeReplicaConfigurationForDesignatedPrimary( } connectionString, err := external.ConfigureConnectionToServer( - ctx, cli, instance.Namespace, &server) + ctx, cli, instance.GetNamespaceName(), &server) if err != nil { return false, err } diff --git a/pkg/management/postgres/probes.go b/pkg/management/postgres/probes.go index 77a819fcd0..fde50a9f48 100644 --- a/pkg/management/postgres/probes.go +++ b/pkg/management/postgres/probes.go @@ -53,7 +53,7 @@ func (instance *Instance) IsServerHealthy() error { // GetStatus Extract the status of this PostgreSQL database func (instance *Instance) GetStatus() (result *postgres.PostgresqlStatus, err error) { result = &postgres.PostgresqlStatus{ - Pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: instance.PodName}}, + Pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: instance.GetPodName()}}, InstanceManagerVersion: versions.Version, MightBeUnavailable: instance.MightBeUnavailable(), } @@ -468,7 +468,7 @@ func (instance *Instance) fillWalStatusFromConnection(result *postgres.Postgresq coalesce(sync_priority, 0) FROM pg_catalog.pg_stat_replication WHERE application_name ~ $1 AND usename = $2`, - fmt.Sprintf("%s-[0-9]+$", instance.ClusterName), + fmt.Sprintf("%s-[0-9]+$", instance.GetClusterName()), v1.StreamingReplicationUser, ) if err != nil { diff --git a/pkg/management/postgres/probes_test.go b/pkg/management/postgres/probes_test.go index 514949c65a..1973f861bc 100644 --- a/pkg/management/postgres/probes_test.go +++ b/pkg/management/postgres/probes_test.go @@ -98,10 +98,9 @@ var _ = Describe("probes", func() { }) It("set the information", func() { - instance := &Instance{ + instance := (&Instance{ pgVersion: &semver.Version{Major: 13}, - PodName: "test-1", - } + }).WithPodName("test-1") status := &postgres.PostgresqlStatus{ IsPrimary: false, } diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index a4cf12afd0..6067f0389c 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -790,9 +790,9 @@ func (info InitInfo) WriteInitialPostgresqlConf(cluster *apiv1.Cluster) error { return fmt.Errorf("while creating a temporary data directory: %w", err) } - temporaryInstance := temporaryInitInfo.GetInstance() - temporaryInstance.Namespace = info.Namespace - temporaryInstance.ClusterName = info.ClusterName + temporaryInstance := temporaryInitInfo.GetInstance(). + WithNamespace(info.Namespace). + WithClusterName(info.ClusterName) _, err = temporaryInstance.RefreshPGHBA(cluster, "") if err != nil { diff --git a/pkg/management/postgres/webserver/local.go b/pkg/management/postgres/webserver/local.go index 61e99860e8..f818623c44 100644 --- a/pkg/management/postgres/webserver/local.go +++ b/pkg/management/postgres/webserver/local.go @@ -81,7 +81,10 @@ func (ws *localWebserverEndpoints) serveCache(w http.ResponseWriter, r *http.Req var cluster apiv1.Cluster err := ws.typedClient.Get( r.Context(), - client.ObjectKey{Name: ws.instance.ClusterName, Namespace: ws.instance.Namespace}, + client.ObjectKey{ + Name: ws.instance.GetClusterName(), + Namespace: ws.instance.GetNamespaceName(), + }, &cluster, ) if apierrs.IsNotFound(err) { @@ -140,8 +143,8 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http. } if err := ws.typedClient.Get(ctx, client.ObjectKey{ - Namespace: ws.instance.Namespace, - Name: ws.instance.ClusterName, + Namespace: ws.instance.GetNamespaceName(), + Name: ws.instance.GetClusterName(), }, &cluster); err != nil { http.Error( w, @@ -151,7 +154,7 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http. } if err := ws.typedClient.Get(ctx, client.ObjectKey{ - Namespace: ws.instance.Namespace, + Namespace: ws.instance.GetNamespaceName(), Name: backupName, }, &backup); err != nil { http.Error( diff --git a/pkg/management/postgres/webserver/metricserver/pg_collector.go b/pkg/management/postgres/webserver/metricserver/pg_collector.go index 9bfd6ab53f..a7a74373b3 100644 --- a/pkg/management/postgres/webserver/metricserver/pg_collector.go +++ b/pkg/management/postgres/webserver/metricserver/pg_collector.go @@ -358,13 +358,13 @@ func (e *Exporter) collectPgMetrics(ch chan<- prometheus.Metric) { // First, let's check the connection. No need to proceed if this fails. if err := db.Ping(); err != nil { log.Warning("Unable to collect metrics", "error", err) - e.Metrics.PostgreSQLUp.WithLabelValues(e.instance.ClusterName).Set(0) + e.Metrics.PostgreSQLUp.WithLabelValues(e.instance.GetClusterName()).Set(0) e.Metrics.Error.Set(1) e.Metrics.CollectionDuration.WithLabelValues("Collect.up").Set(time.Since(collectionStart).Seconds()) return } - e.Metrics.PostgreSQLUp.WithLabelValues(e.instance.ClusterName).Set(1) + e.Metrics.PostgreSQLUp.WithLabelValues(e.instance.GetClusterName()).Set(1) e.Metrics.Error.Set(0) e.Metrics.CollectionDuration.WithLabelValues("Collect.up").Set(time.Since(collectionStart).Seconds()) @@ -541,7 +541,7 @@ func collectPGVersion(e *Exporter) error { if err != nil { return err } - e.Metrics.PgVersion.WithLabelValues(majorMinor, e.instance.ClusterName).Set(version) + e.Metrics.PgVersion.WithLabelValues(majorMinor, e.instance.GetClusterName()).Set(version) return nil } diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go index 231f6dccd0..7b9d75becd 100644 --- a/pkg/management/postgres/webserver/remote.go +++ b/pkg/management/postgres/webserver/remote.go @@ -357,13 +357,17 @@ func (ws *remoteWebserverEndpoints) pgArchivePartial(w http.ResponseWriter, req var cluster apiv1.Cluster if err := ws.typedClient.Get(req.Context(), - client.ObjectKey{Namespace: ws.instance.Namespace, Name: ws.instance.ClusterName}, + client.ObjectKey{ + Namespace: ws.instance.GetNamespaceName(), + Name: ws.instance.GetClusterName(), + }, &cluster); err != nil { sendBadRequestJSONResponse(w, "NO_CLUSTER_FOUND", err.Error()) return } - if cluster.Status.TargetPrimary != ws.instance.PodName || cluster.Status.CurrentPrimary != ws.instance.PodName { + if cluster.Status.TargetPrimary != ws.instance.GetPodName() || + cluster.Status.CurrentPrimary != ws.instance.GetPodName() { sendBadRequestJSONResponse(w, "NOT_EXPECTED_PRIMARY", "") return } diff --git a/pkg/management/upgrade/upgrade.go b/pkg/management/upgrade/upgrade.go index 8c8ce4ff13..7c08c0bb24 100644 --- a/pkg/management/upgrade/upgrade.go +++ b/pkg/management/upgrade/upgrade.go @@ -82,7 +82,8 @@ func FromReader( } // Validate the hash of this instance manager - if err := validateInstanceManagerHash(typedClient, instance.ClusterName, instance.Namespace, + if err := validateInstanceManagerHash(typedClient, + instance.GetClusterName(), instance.GetNamespaceName(), instanceStatus.InstanceArch, newHash); err != nil { return fmt.Errorf("while validating instance manager binary: %w", err) }