Skip to content

Commit

Permalink
chore: attempt to fix the version problems of replicationgroups (#1)
Browse files Browse the repository at this point in the history
Signed-off-by: Daniel.Humphries <[email protected]>
  • Loading branch information
dhumphries-sainsburys committed Aug 26, 2024
1 parent 34115e2 commit 2b4ad73
Show file tree
Hide file tree
Showing 2 changed files with 59 additions and 5 deletions.
12 changes: 8 additions & 4 deletions pkg/controller/cache/replicationgroup/managed.go
Original file line number Diff line number Diff line change
Expand Up @@ -364,11 +364,15 @@ func getVersion(version *string) (*string, error) {
}
versionOut := strconv.Itoa(version1)
if len(versionSplit) > 1 {
version2, err := strconv.Atoi(versionSplit[1])
if err != nil {
return nil, errors.Wrap(err, errVersionInput)
if versionSplit[1] != "x" {
version2, err := strconv.Atoi(versionSplit[1])
if err != nil {
return nil, errors.Wrap(err, errVersionInput)
}
versionOut += "." + strconv.Itoa(version2)
} else {
versionOut += ".x"
}
versionOut += "." + strconv.Itoa(version2)
}
return &versionOut, nil
}
52 changes: 51 additions & 1 deletion pkg/controller/cache/replicationgroup/managed_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,15 +37,17 @@ import (
)

const (
name = "coolGroup"
name = "coolGroup"
engineVersionToTest = "5.0.2"
alternateEngineVersionToTest = "6.x"
)

var (
cacheNodeType = "n1.super.cool"
autoFailoverEnabled = true
cacheParameterGroupName = "coolParamGroup"
engineVersion = "5.0"
alternateEngineVersion = "6.x"
port = 6379
host = "172.16.0.1"
maintenanceWindow = "tomorrow"
Expand Down Expand Up @@ -789,6 +791,54 @@ func TestUpdate(t *testing.T) {
),
returnsErr: false,
},
{
name: "IncreaseReplicationsAndCheckBehaviourVersionx",
e: &external{client: &fake.MockClient{
MockDescribeReplicationGroups: func(ctx context.Context, _ *elasticache.DescribeReplicationGroupsInput, opts []func(*elasticache.Options)) (*elasticache.DescribeReplicationGroupsOutput, error) {
return &elasticache.DescribeReplicationGroupsOutput{
ReplicationGroups: []types.ReplicationGroup{{
Status: aws.String(v1beta1.StatusAvailable),
MemberClusters: cacheClusters,
AutomaticFailover: types.AutomaticFailoverStatusEnabled,
CacheNodeType: aws.String(cacheNodeType),
SnapshotRetentionLimit: aws.Int32(int32(snapshotRetentionLimit)),
SnapshotWindow: aws.String(snapshotWindow),
ClusterEnabled: aws.Bool(true),
ConfigurationEndpoint: &types.Endpoint{Address: aws.String(host), Port: int32(port)},
}},
}, nil
},
MockDescribeCacheClusters: func(ctx context.Context, _ *elasticache.DescribeCacheClustersInput, opts []func(*elasticache.Options)) (*elasticache.DescribeCacheClustersOutput, error) {
return &elasticache.DescribeCacheClustersOutput{
CacheClusters: []types.CacheCluster{
{EngineVersion: aws.String(engineVersion)},
{EngineVersion: aws.String(engineVersion)},
{EngineVersion: aws.String(engineVersion)},
},
}, nil
},
MockIncreaseReplicaCount: func(ctx context.Context, _ *elasticache.IncreaseReplicaCountInput, opts []func(*elasticache.Options)) (*elasticache.IncreaseReplicaCountOutput, error) {
return &elasticache.IncreaseReplicaCountOutput{}, nil
},
}},
r: replicationGroup(
withEngineVersion(alternateEngineVersionToTest),
withReplicationGroupID(name),
withProviderStatus(v1beta1.StatusAvailable),
withConditions(xpv1.Available()),
withMemberClusters(cacheClusters),
withNumCacheClusters(4),
),
want: replicationGroup(
withEngineVersion(alternateEngineVersion),
withReplicationGroupID(name),
withProviderStatus(v1beta1.StatusAvailable),
withConditions(xpv1.Available()),
withMemberClusters(cacheClusters),
withNumCacheClusters(4),
),
returnsErr: false,
},
}

for _, tc := range cases {
Expand Down

0 comments on commit 2b4ad73

Please sign in to comment.