Skip to content

Commit

Permalink
feat(operator): support Directory Buckets for S3 Express One Zone (#363)
Browse files Browse the repository at this point in the history
* test.sh

* deploy sh

* without tests and readme

* add tests for client

* tests for operation

* tests for operator collection

* README

* change test deploy.sh

* modify deploy sh

* refactor for s3 client

* comments

* refactor

* change return vars

* change IS3

* change returns

* refactor

* refactor returns

* fix

* refactor

* change IS3

* refactor is tatement

* redactor

* deploy.sh

* refactor for directoryBucketflag
  • Loading branch information
go-to-k authored Aug 7, 2024
1 parent 2bdaad3 commit 7bb4fa2
Show file tree
Hide file tree
Showing 17 changed files with 1,199 additions and 249 deletions.
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ All resources that do not fail normal deletion can be deleted as is.

| RESOURCE TYPE | DETAILS |
| ---- | ---- |
| AWS::S3::Bucket | S3 Buckets, including buckets with **Non-empty or Versioning enabled** and DeletionPolicy **not Retain**.(Because "Retain" buckets should not be deleted.) |
| AWS::S3::Bucket | S3 Buckets, including buckets with **Non-empty or Versioning enabled** and DeletionPolicy **not Retain**. (Because "Retain" buckets should not be deleted.) |
| AWS::S3Express::DirectoryBucket | S3 Directory Buckets for S3 Express One Zone, including buckets with Non-empty and DeletionPolicy not Retain. (Because "Retain" buckets should not be deleted.) |
| AWS::IAM::Role | IAM Roles, including roles **with policies from outside the stack**. |
| AWS::ECR::Repository | ECR Repositories, including repositories **containing images**. |
| AWS::Backup::BackupVault | Backup Vaults, including vaults **containing recovery points**. |
Expand Down Expand Up @@ -137,6 +138,7 @@ Select ResourceTypes you wish to delete even if DELETE_FAILED.
However, if a resource can be deleted without becoming DELETE_FAILED by the normal CloudFormation stack deletion feature, the resource will be deleted even if you do not select that resource type.
[Use arrows to move, space to select, <right> to all, <left> to none, type to filter]
[x] AWS::S3::Bucket
[ ] AWS::S3Express::DirectoryBucket
[ ] AWS::IAM::Role
> [x] AWS::ECR::Repository
[ ] AWS::Backup::BackupVault
Expand Down
5 changes: 5 additions & 0 deletions internal/operation/operator_collection.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ func (c *OperatorCollection) SetOperatorCollection(stackName *string, stackResou
c.stackName = aws.ToString(stackName)

s3BucketOperator := c.operatorFactory.CreateS3BucketOperator()
s3DirectoryBucketOperator := c.operatorFactory.CreateS3DirectoryBucketOperator()
iamRoleOperator := c.operatorFactory.CreateIamRoleOperator()
ecrRepositoryOperator := c.operatorFactory.CreateEcrRepositoryOperator()
backupVaultOperator := c.operatorFactory.CreateBackupVaultOperator()
Expand All @@ -57,6 +58,8 @@ func (c *OperatorCollection) SetOperatorCollection(stackName *string, stackResou
switch *stackResource.ResourceType {
case resourcetype.S3Bucket:
s3BucketOperator.AddResource(&stackResource)
case resourcetype.S3DirectoryBucket:
s3DirectoryBucketOperator.AddResource(&stackResource)
case resourcetype.IamRole:
iamRoleOperator.AddResource(&stackResource)
case resourcetype.EcrRepository:
Expand All @@ -75,6 +78,7 @@ func (c *OperatorCollection) SetOperatorCollection(stackName *string, stackResou
}

c.operators = append(c.operators, s3BucketOperator)
c.operators = append(c.operators, s3DirectoryBucketOperator)
c.operators = append(c.operators, iamRoleOperator)
c.operators = append(c.operators, ecrRepositoryOperator)
c.operators = append(c.operators, backupVaultOperator)
Expand Down Expand Up @@ -113,6 +117,7 @@ func (c *OperatorCollection) RaiseUnsupportedResourceError() error {
supportedStackResourcesHeader := []string{"ResourceType", "Description"}
supportedStackResourcesData := [][]string{
{resourcetype.S3Bucket, "S3 Buckets, including buckets with Non-empty or Versioning enabled and DeletionPolicy not Retain."},
{resourcetype.S3DirectoryBucket, "S3 Directory Buckets for S3 Express One Zone, including buckets with Non-empty and DeletionPolicy not Retain."},
{resourcetype.IamRole, "IAM Roles, including roles with policies from outside the stack."},
{resourcetype.EcrRepository, "ECR Repositories, including repositories containing images."},
{resourcetype.BackupVault, "Backup Vaults, including vaults containing recovery points."},
Expand Down
117 changes: 103 additions & 14 deletions internal/operation/operator_collection_test.go

Large diffs are not rendered by default.

20 changes: 20 additions & 0 deletions internal/operation/operator_factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,30 @@ func (f *OperatorFactory) CreateS3BucketOperator() *S3BucketOperator {
return NewS3BucketOperator(
client.NewS3(
sdkS3Client,
false,
),
)
}

func (f *OperatorFactory) CreateS3DirectoryBucketOperator() *S3BucketOperator {
sdkS3Client := s3.NewFromConfig(f.config, func(o *s3.Options) {
o.RetryMaxAttempts = SDKRetryMaxAttempts
o.RetryMode = aws.RetryModeStandard
})

// Basically, a separate operator should be defined for each resource type,
// but the S3DirectoryBucket uses the same operator as the S3BucketOperator
// since the process is almost the same.
operator := NewS3BucketOperator(
client.NewS3(
sdkS3Client,
true,
),
)

return operator
}

func (f *OperatorFactory) CreateCustomOperator() *CustomOperator {
return NewCustomOperator() // Implicit instances that do not actually delete resources
}
19 changes: 14 additions & 5 deletions internal/operation/s3_bucket.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,11 @@ func (o *S3BucketOperator) DeleteS3Bucket(ctx context.Context, bucketName *strin
var keyMarker *string
var versionIdMarker *string
for {
var versions []s3types.ObjectIdentifier
var objects []s3types.ObjectIdentifier

// ListObjectVersions API can only retrieve up to 1000 items, so it is good to pass it
// ListObjectVersions/ListObjectsV2 API can only retrieve up to 1000 items, so it is good to pass it
// directly to DeleteObjects, which can only delete up to 1000 items.
versions, keyMarker, versionIdMarker, err = o.client.ListObjectVersionsByPage(
output, err := o.client.ListObjectsOrVersionsByPage(
ctx,
bucketName,
keyMarker,
Expand All @@ -83,7 +83,12 @@ func (o *S3BucketOperator) DeleteS3Bucket(ctx context.Context, bucketName *strin
if err != nil {
return err
}
if len(versions) == 0 {

objects = output.ObjectIdentifiers
keyMarker = output.NextKeyMarker
versionIdMarker = output.NextVersionIdMarker

if len(objects) == 0 {
break
}

Expand All @@ -92,7 +97,7 @@ func (o *S3BucketOperator) DeleteS3Bucket(ctx context.Context, bucketName *strin
// the next loop. Therefore, there seems to be no throttling concern, so the number of
// parallels is not limited by semaphore. (Throttling occurs at about 3500 deletions
// per second.)
gotErrors, err := o.client.DeleteObjects(ctx, bucketName, versions)
gotErrors, err := o.client.DeleteObjects(ctx, bucketName, objects)
if err != nil {
return err
}
Expand Down Expand Up @@ -137,3 +142,7 @@ func (o *S3BucketOperator) DeleteS3Bucket(ctx context.Context, bucketName *strin

return nil
}

func (o *S3BucketOperator) GetDirectoryBucketsFlag() bool {
return o.client.GetDirectoryBucketsFlag()
}
Loading

0 comments on commit 7bb4fa2

Please sign in to comment.