diff --git a/tests/e2e/kubetest2-kops/aws/s3.go b/tests/e2e/kubetest2-kops/aws/s3.go index 0eeb216a6e87a..4d7165e0a2995 100644 --- a/tests/e2e/kubetest2-kops/aws/s3.go +++ b/tests/e2e/kubetest2-kops/aws/s3.go @@ -37,12 +37,21 @@ import ( // running on AWS. const defaultRegion = "us-east-2" +var bucketNameRegex = regexp.MustCompile("[^a-z0-9-]") + // Client contains S3 and STS clients that are used to perform bucket and object actions. type Client struct { s3Client *s3.Client stsClient *sts.Client } +type BucketType string + +const ( + BucketTypeStateStore BucketType = "state" + BucketTypeDiscoveryStore BucketType = "discovery" +) + // NewAWSClient returns a new instance of awsClient configured to work in the default region (us-east-2). func NewClient(ctx context.Context) (*Client, error) { cfg, err := awsconfig.LoadDefaultConfig(ctx, @@ -58,7 +67,7 @@ func NewClient(ctx context.Context) (*Client, error) { } // BucketName constructs an unique bucket name using the AWS account ID in the default region (us-east-2). -func (c Client) BucketName(ctx context.Context) (string, error) { +func (c Client) BucketName(ctx context.Context, bucketType BucketType) (string, error) { // Construct the bucket name based on the ProwJob ID (if running in Prow) or AWS account ID (if running outside // Prow) and the current timestamp var identifier string @@ -72,11 +81,11 @@ func (c Client) BucketName(ctx context.Context) (string, error) { identifier = *callerIdentity.Account } timestamp := time.Now().Format("20060102150405") - bucket := fmt.Sprintf("k8s-infra-kops-%s-%s", identifier, timestamp) + bucket := fmt.Sprintf("k8s-infra-kops-%s-%s-%s", bucketType, identifier, timestamp) bucket = strings.ToLower(bucket) // Only allow lowercase letters, numbers, and hyphens - bucket = regexp.MustCompile("[^a-z0-9-]").ReplaceAllString(bucket, "") + bucket = bucketNameRegex.ReplaceAllString(bucket, "") if len(bucket) > 63 { bucket = bucket[:63] // Max length is 63 @@ -120,6 +129,15 @@ func (c Client) EnsureS3Bucket(ctx context.Context, bucketName string, publicRea klog.Infof("Bucket %s created successfully", bucketName) if publicRead { + // We assume it will take 5-10 seconds for the bucket to be created and wait for it. + time.Sleep(10 * time.Second) + err = c.setPublicAccessBlock(ctx, bucketName) + if err != nil { + klog.Errorf("Failed to disable public access block policies on bucket %s, err: %v", bucketName, err) + + return fmt.Errorf("disabling public access block policies for bucket %s: %w", bucketName, err) + } + err = c.setPublicReadPolicy(ctx, bucketName) if err != nil { klog.Errorf("Failed to set public read policy on bucket %s, err: %v", bucketName, err) @@ -136,13 +154,53 @@ func (c Client) EnsureS3Bucket(ctx context.Context, bucketName string, publicRea // DeleteS3Bucket deletes a S3 bucket with the given name. func (c Client) DeleteS3Bucket(ctx context.Context, bucketName string) error { bucketName = strings.TrimPrefix(bucketName, "s3://") + + // Empty the bucket first + paginator := s3.NewListObjectsV2Paginator(c.s3Client, &s3.ListObjectsV2Input{ + Bucket: aws.String(bucketName), + }) + + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + var noBucket *types.NoSuchBucket + if errors.As(err, &noBucket) { + klog.Infof("Bucket %s does not exist.", bucketName) + return nil + } + return fmt.Errorf("listing objects in bucket %s: %w", bucketName, err) + } + + if len(page.Contents) == 0 { + continue + } + + var objects []types.ObjectIdentifier + for _, obj := range page.Contents { + objects = append(objects, types.ObjectIdentifier{ + Key: obj.Key, + }) + } + + _, err = c.s3Client.DeleteObjects(ctx, &s3.DeleteObjectsInput{ + Bucket: aws.String(bucketName), + Delete: &types.Delete{ + Objects: objects, + Quiet: aws.Bool(true), + }, + }) + if err != nil { + return fmt.Errorf("deleting objects in bucket %s: %w", bucketName, err) + } + } + _, err := c.s3Client.DeleteBucket(ctx, &s3.DeleteBucketInput{ Bucket: aws.String(bucketName), }) if err != nil { var noBucket *types.NoSuchBucket if errors.As(err, &noBucket) { - klog.Infof("Bucket %s does not exits.", bucketName) + klog.Infof("Bucket %s does not exist.", bucketName) return nil } else { @@ -192,3 +250,16 @@ func (c Client) setPublicReadPolicy(ctx context.Context, bucketName string) erro return nil } + +func (c Client) setPublicAccessBlock(ctx context.Context, bucketName string) error { + _, err := c.s3Client.PutPublicAccessBlock(ctx, &s3.PutPublicAccessBlockInput{ + Bucket: aws.String(bucketName), + PublicAccessBlockConfiguration: &types.PublicAccessBlockConfiguration{ + BlockPublicAcls: aws.Bool(true), + IgnorePublicAcls: aws.Bool(true), + BlockPublicPolicy: aws.Bool(false), + RestrictPublicBuckets: aws.Bool(false), + }, + }) + return err +} diff --git a/tests/e2e/kubetest2-kops/deployer/common.go b/tests/e2e/kubetest2-kops/deployer/common.go index 915a89770d6b2..f0ca007e594ee 100644 --- a/tests/e2e/kubetest2-kops/deployer/common.go +++ b/tests/e2e/kubetest2-kops/deployer/common.go @@ -364,7 +364,7 @@ func (d *deployer) stateStore() string { switch d.CloudProvider { case "aws": ctx := context.Background() - bucketName, err := d.aws.BucketName(ctx) + bucketName, err := d.aws.BucketName(ctx, aws.BucketTypeStateStore) if err != nil { klog.Fatalf("Failed to generate bucket name: %v", err) return "" @@ -391,11 +391,18 @@ func (d *deployer) discoveryStore() string { if d.discoveryStoreName != "" { return d.discoveryStoreName } - discovery := os.Getenv("KOPS_DISCOVERY_STORE") - if discovery == "" { + discovery, found := os.LookupEnv("KOPS_DISCOVERY_STORE") + if !found { switch d.CloudProvider { case "aws": - discovery = "s3://k8s-kops-ci-prow" + ctx := context.Background() + bucketName, err := d.aws.BucketName(ctx, aws.BucketTypeDiscoveryStore) + if err != nil { + klog.Fatalf("Failed to generate bucket name: %v", err) + return "" + } + d.createBucket = true + discovery = "s3://" + bucketName } } d.discoveryStoreName = discovery diff --git a/tests/e2e/kubetest2-kops/deployer/down.go b/tests/e2e/kubetest2-kops/deployer/down.go index 2904a1db9e5e2..bf50a276e0473 100644 --- a/tests/e2e/kubetest2-kops/deployer/down.go +++ b/tests/e2e/kubetest2-kops/deployer/down.go @@ -80,6 +80,9 @@ func (d *deployer) Down() error { if err := d.aws.DeleteS3Bucket(ctx, d.stateStore()); err != nil { return err } + if err := d.aws.DeleteS3Bucket(ctx, d.discoveryStore()); err != nil { + return err + } case "gce": gce.DeleteGCSBucket(d.stateStore(), d.GCPProject) gce.DeleteGCSBucket(d.stagingStore(), d.GCPProject) diff --git a/tests/e2e/kubetest2-kops/deployer/up.go b/tests/e2e/kubetest2-kops/deployer/up.go index 81bda29db6101..8c3d0dd85f3a1 100644 --- a/tests/e2e/kubetest2-kops/deployer/up.go +++ b/tests/e2e/kubetest2-kops/deployer/up.go @@ -73,6 +73,9 @@ func (d *deployer) Up() error { if err := d.aws.EnsureS3Bucket(ctx, d.stateStore(), false); err != nil { return err } + if err := d.aws.EnsureS3Bucket(ctx, d.discoveryStore(), true); err != nil { + return err + } case "gce": if err := gce.EnsureGCSBucket(d.stateStore(), d.GCPProject, false); err != nil { return err @@ -165,6 +168,7 @@ func (d *deployer) createCluster(zones []string, adminAccess string, yes bool) e d.KopsBinaryPath, "create", "cluster", "--name", d.ClusterName, "--cloud", d.CloudProvider, + "--discovery-store", d.discoveryStore(), "--kubernetes-version", d.KubernetesVersion, "--ssh-public-key", d.SSHPublicKeyPath, "--set", "cluster.spec.nodePortAccess=0.0.0.0/0",