Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
79 changes: 75 additions & 4 deletions tests/e2e/kubetest2-kops/aws/s3.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,21 @@ import (
// running on AWS.
const defaultRegion = "us-east-2"

var bucketNameRegex = regexp.MustCompile("[^a-z0-9-]")

// Client contains S3 and STS clients that are used to perform bucket and object actions.
type Client struct {
s3Client *s3.Client
stsClient *sts.Client
}

type BucketType string

const (
BucketTypeStateStore BucketType = "state"
BucketTypeDiscoveryStore BucketType = "discovery"
)

// NewAWSClient returns a new instance of awsClient configured to work in the default region (us-east-2).
func NewClient(ctx context.Context) (*Client, error) {
cfg, err := awsconfig.LoadDefaultConfig(ctx,
Expand All @@ -58,7 +67,7 @@ func NewClient(ctx context.Context) (*Client, error) {
}

// BucketName constructs an unique bucket name using the AWS account ID in the default region (us-east-2).
func (c Client) BucketName(ctx context.Context) (string, error) {
func (c Client) BucketName(ctx context.Context, bucketType BucketType) (string, error) {
// Construct the bucket name based on the ProwJob ID (if running in Prow) or AWS account ID (if running outside
// Prow) and the current timestamp
var identifier string
Expand All @@ -72,11 +81,11 @@ func (c Client) BucketName(ctx context.Context) (string, error) {
identifier = *callerIdentity.Account
}
timestamp := time.Now().Format("20060102150405")
bucket := fmt.Sprintf("k8s-infra-kops-%s-%s", identifier, timestamp)
bucket := fmt.Sprintf("k8s-infra-kops-%s-%s-%s", bucketType, identifier, timestamp)

bucket = strings.ToLower(bucket)
// Only allow lowercase letters, numbers, and hyphens
bucket = regexp.MustCompile("[^a-z0-9-]").ReplaceAllString(bucket, "")
bucket = bucketNameRegex.ReplaceAllString(bucket, "")

if len(bucket) > 63 {
bucket = bucket[:63] // Max length is 63
Expand Down Expand Up @@ -120,6 +129,15 @@ func (c Client) EnsureS3Bucket(ctx context.Context, bucketName string, publicRea
klog.Infof("Bucket %s created successfully", bucketName)

if publicRead {
// We assume it will take 5-10 seconds for the bucket to be created and wait for it.
time.Sleep(10 * time.Second)
err = c.setPublicAccessBlock(ctx, bucketName)
if err != nil {
klog.Errorf("Failed to disable public access block policies on bucket %s, err: %v", bucketName, err)

return fmt.Errorf("disabling public access block policies for bucket %s: %w", bucketName, err)
}

err = c.setPublicReadPolicy(ctx, bucketName)
if err != nil {
klog.Errorf("Failed to set public read policy on bucket %s, err: %v", bucketName, err)
Expand All @@ -136,13 +154,53 @@ func (c Client) EnsureS3Bucket(ctx context.Context, bucketName string, publicRea
// DeleteS3Bucket deletes a S3 bucket with the given name.
func (c Client) DeleteS3Bucket(ctx context.Context, bucketName string) error {
bucketName = strings.TrimPrefix(bucketName, "s3://")

// Empty the bucket first
paginator := s3.NewListObjectsV2Paginator(c.s3Client, &s3.ListObjectsV2Input{
Bucket: aws.String(bucketName),
})
Comment on lines +158 to +161
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It is a good idea, but I would expect that kOps removes all the files from the bucket, otherwise it's a bug.
This approach may hide such issues.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

kOps force delete the S3 buckets at shutdown whether the bucket is empty or not. This is just an extra step to guarantee the bucket is properly deleted.

Potential issues will probably not of the emptiness of the bucket.

Happy to drop this if you think it's unnecessary


for paginator.HasMorePages() {
page, err := paginator.NextPage(ctx)
if err != nil {
var noBucket *types.NoSuchBucket
if errors.As(err, &noBucket) {
klog.Infof("Bucket %s does not exist.", bucketName)
return nil
}
return fmt.Errorf("listing objects in bucket %s: %w", bucketName, err)
}

if len(page.Contents) == 0 {
continue
}

var objects []types.ObjectIdentifier
for _, obj := range page.Contents {
objects = append(objects, types.ObjectIdentifier{
Key: obj.Key,
})
}

_, err = c.s3Client.DeleteObjects(ctx, &s3.DeleteObjectsInput{
Bucket: aws.String(bucketName),
Delete: &types.Delete{
Objects: objects,
Quiet: aws.Bool(true),
},
})
if err != nil {
return fmt.Errorf("deleting objects in bucket %s: %w", bucketName, err)
}
}

_, err := c.s3Client.DeleteBucket(ctx, &s3.DeleteBucketInput{
Bucket: aws.String(bucketName),
})
if err != nil {
var noBucket *types.NoSuchBucket
if errors.As(err, &noBucket) {
klog.Infof("Bucket %s does not exits.", bucketName)
klog.Infof("Bucket %s does not exist.", bucketName)

return nil
} else {
Expand Down Expand Up @@ -192,3 +250,16 @@ func (c Client) setPublicReadPolicy(ctx context.Context, bucketName string) erro

return nil
}

func (c Client) setPublicAccessBlock(ctx context.Context, bucketName string) error {
_, err := c.s3Client.PutPublicAccessBlock(ctx, &s3.PutPublicAccessBlockInput{
Bucket: aws.String(bucketName),
PublicAccessBlockConfiguration: &types.PublicAccessBlockConfiguration{
BlockPublicAcls: aws.Bool(true),
IgnorePublicAcls: aws.Bool(true),
BlockPublicPolicy: aws.Bool(false),
RestrictPublicBuckets: aws.Bool(false),
},
})
return err
}
15 changes: 11 additions & 4 deletions tests/e2e/kubetest2-kops/deployer/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ func (d *deployer) stateStore() string {
switch d.CloudProvider {
case "aws":
ctx := context.Background()
bucketName, err := d.aws.BucketName(ctx)
bucketName, err := d.aws.BucketName(ctx, aws.BucketTypeStateStore)
if err != nil {
klog.Fatalf("Failed to generate bucket name: %v", err)
return ""
Expand All @@ -391,11 +391,18 @@ func (d *deployer) discoveryStore() string {
if d.discoveryStoreName != "" {
return d.discoveryStoreName
}
discovery := os.Getenv("KOPS_DISCOVERY_STORE")
if discovery == "" {
discovery, found := os.LookupEnv("KOPS_DISCOVERY_STORE")
if !found {
switch d.CloudProvider {
case "aws":
discovery = "s3://k8s-kops-ci-prow"
ctx := context.Background()
bucketName, err := d.aws.BucketName(ctx, aws.BucketTypeDiscoveryStore)
if err != nil {
klog.Fatalf("Failed to generate bucket name: %v", err)
return ""
}
d.createBucket = true
discovery = "s3://" + bucketName
}
}
d.discoveryStoreName = discovery
Expand Down
3 changes: 3 additions & 0 deletions tests/e2e/kubetest2-kops/deployer/down.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,9 @@ func (d *deployer) Down() error {
if err := d.aws.DeleteS3Bucket(ctx, d.stateStore()); err != nil {
return err
}
if err := d.aws.DeleteS3Bucket(ctx, d.discoveryStore()); err != nil {
return err
}
case "gce":
gce.DeleteGCSBucket(d.stateStore(), d.GCPProject)
gce.DeleteGCSBucket(d.stagingStore(), d.GCPProject)
Expand Down
4 changes: 4 additions & 0 deletions tests/e2e/kubetest2-kops/deployer/up.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,9 @@ func (d *deployer) Up() error {
if err := d.aws.EnsureS3Bucket(ctx, d.stateStore(), false); err != nil {
return err
}
if err := d.aws.EnsureS3Bucket(ctx, d.discoveryStore(), true); err != nil {
return err
}
case "gce":
if err := gce.EnsureGCSBucket(d.stateStore(), d.GCPProject, false); err != nil {
return err
Expand Down Expand Up @@ -165,6 +168,7 @@ func (d *deployer) createCluster(zones []string, adminAccess string, yes bool) e
d.KopsBinaryPath, "create", "cluster",
"--name", d.ClusterName,
"--cloud", d.CloudProvider,
"--discovery-store", d.discoveryStore(),
"--kubernetes-version", d.KubernetesVersion,
"--ssh-public-key", d.SSHPublicKeyPath,
"--set", "cluster.spec.nodePortAccess=0.0.0.0/0",
Expand Down
Loading