diff --git a/pkg/controller/cache/replicationgroup/managed.go b/pkg/controller/cache/replicationgroup/managed.go index 2df8d5be9f..c01282d314 100644 --- a/pkg/controller/cache/replicationgroup/managed.go +++ b/pkg/controller/cache/replicationgroup/managed.go @@ -364,11 +364,15 @@ func getVersion(version *string) (*string, error) { } versionOut := strconv.Itoa(version1) if len(versionSplit) > 1 { - version2, err := strconv.Atoi(versionSplit[1]) - if err != nil { - return nil, errors.Wrap(err, errVersionInput) + if versionSplit[1] != "x" { + version2, err := strconv.Atoi(versionSplit[1]) + if err != nil { + return nil, errors.Wrap(err, errVersionInput) + } + versionOut += "." + strconv.Itoa(version2) + } else { + versionOut += ".x" } - versionOut += "." + strconv.Itoa(version2) } return &versionOut, nil } diff --git a/pkg/controller/cache/replicationgroup/managed_test.go b/pkg/controller/cache/replicationgroup/managed_test.go index 1137d4f569..8661ca65a6 100644 --- a/pkg/controller/cache/replicationgroup/managed_test.go +++ b/pkg/controller/cache/replicationgroup/managed_test.go @@ -37,8 +37,9 @@ import ( ) const ( - name = "coolGroup" + name = "coolGroup" engineVersionToTest = "5.0.2" + alternateEngineVersionToTest = "6.x" ) var ( @@ -46,6 +47,7 @@ var ( autoFailoverEnabled = true cacheParameterGroupName = "coolParamGroup" engineVersion = "5.0" + alternateEngineVersion = "6.x" port = 6379 host = "172.16.0.1" maintenanceWindow = "tomorrow" @@ -789,6 +791,54 @@ func TestUpdate(t *testing.T) { ), returnsErr: false, }, + { + name: "IncreaseReplicationsAndCheckBehaviourVersionx", + e: &external{client: &fake.MockClient{ + MockDescribeReplicationGroups: func(ctx context.Context, _ *elasticache.DescribeReplicationGroupsInput, opts []func(*elasticache.Options)) (*elasticache.DescribeReplicationGroupsOutput, error) { + return &elasticache.DescribeReplicationGroupsOutput{ + ReplicationGroups: []types.ReplicationGroup{{ + Status: aws.String(v1beta1.StatusAvailable), + MemberClusters: cacheClusters, + AutomaticFailover: types.AutomaticFailoverStatusEnabled, + CacheNodeType: aws.String(cacheNodeType), + SnapshotRetentionLimit: aws.Int32(int32(snapshotRetentionLimit)), + SnapshotWindow: aws.String(snapshotWindow), + ClusterEnabled: aws.Bool(true), + ConfigurationEndpoint: &types.Endpoint{Address: aws.String(host), Port: int32(port)}, + }}, + }, nil + }, + MockDescribeCacheClusters: func(ctx context.Context, _ *elasticache.DescribeCacheClustersInput, opts []func(*elasticache.Options)) (*elasticache.DescribeCacheClustersOutput, error) { + return &elasticache.DescribeCacheClustersOutput{ + CacheClusters: []types.CacheCluster{ + {EngineVersion: aws.String(engineVersion)}, + {EngineVersion: aws.String(engineVersion)}, + {EngineVersion: aws.String(engineVersion)}, + }, + }, nil + }, + MockIncreaseReplicaCount: func(ctx context.Context, _ *elasticache.IncreaseReplicaCountInput, opts []func(*elasticache.Options)) (*elasticache.IncreaseReplicaCountOutput, error) { + return &elasticache.IncreaseReplicaCountOutput{}, nil + }, + }}, + r: replicationGroup( + withEngineVersion(alternateEngineVersionToTest), + withReplicationGroupID(name), + withProviderStatus(v1beta1.StatusAvailable), + withConditions(xpv1.Available()), + withMemberClusters(cacheClusters), + withNumCacheClusters(4), + ), + want: replicationGroup( + withEngineVersion(alternateEngineVersion), + withReplicationGroupID(name), + withProviderStatus(v1beta1.StatusAvailable), + withConditions(xpv1.Available()), + withMemberClusters(cacheClusters), + withNumCacheClusters(4), + ), + returnsErr: false, + }, } for _, tc := range cases {