diff --git a/backend/s3/s3.go b/backend/s3/s3.go index fcc9cb145..d7884ce24 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -2488,6 +2488,33 @@ In this case, you might want to try disabling this option. Help: "Endpoint for STS.\n\nLeave blank if using AWS to use the default endpoint for the region.", Provider: "AWS", Advanced: true, + }, { + Name: "use_already_exists", + Help: strings.ReplaceAll(`Set if rclone should report BucketAlreadyExists errors on bucket creation. + +At some point during the evolution of the s3 protocol, AWS started +returning an |AlreadyOwnedByYou| error when attempting to create a +bucket that the user already owned, rather than a +|BucketAlreadyExists| error. + +Unfortunately exactly what has been implemented by s3 clones is a +little inconsistent, some return |AlreadyOwnedByYou|, some return +|BucketAlreadyExists| and some return no error at all. + +This is important to rclone because it ensures the bucket exists by +creating it on quite a lot of operations (unless +|--s3-no-check-bucket| is used). + +If rclone knows the provider can return |AlreadyOwnedByYou| or returns +no error then it can report |BucketAlreadyExists| errors when the user +attempts to create a bucket not owned by them. Otherwise rclone +ignores the |BucketAlreadyExists| error which can lead to confusion. + +This should be automatically set correctly for all providers rclone +knows about - please make a bug report if not. +`, "|", "`"), + Default: fs.Tristate{}, + Advanced: true, }, }}) } @@ -2614,6 +2641,7 @@ type Options struct { MightGzip fs.Tristate `config:"might_gzip"` UseAcceptEncodingGzip fs.Tristate `config:"use_accept_encoding_gzip"` NoSystemMetadata bool `config:"no_system_metadata"` + UseAlreadyExists fs.Tristate `config:"use_already_exists"` } // Fs represents a remote s3 server @@ -3012,6 +3040,7 @@ func setQuirks(opt *Options) { useMultipartEtag = true // Set if Etags for multpart uploads are compatible with AWS useAcceptEncodingGzip = true // Set Accept-Encoding: gzip mightGzip = true // assume all providers might use content encoding gzip until proven otherwise + useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket ) switch opt.Provider { case "AWS": @@ -3019,18 +3048,22 @@ func setQuirks(opt *Options) { mightGzip = false // Never auto gzips objects case "Alibaba": useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS + useAlreadyExists = true // returns 200 OK case "HuaweiOBS": // Huawei OBS PFS is not support listObjectV2, and if turn on the urlEncodeListing, marker will not work and keep list same page forever. urlEncodeListings = false listObjectsV2 = false + useAlreadyExists = false // untested case "Ceph": listObjectsV2 = false virtualHostStyle = false urlEncodeListings = false + useAlreadyExists = false // untested case "ChinaMobile": listObjectsV2 = false virtualHostStyle = false urlEncodeListings = false + useAlreadyExists = false // untested case "Cloudflare": virtualHostStyle = false useMultipartEtag = false // currently multipart Etags are random @@ -3038,88 +3071,104 @@ func setQuirks(opt *Options) { listObjectsV2 = false virtualHostStyle = false urlEncodeListings = false + useAlreadyExists = false // untested case "DigitalOcean": urlEncodeListings = false + useAlreadyExists = false // untested case "Dreamhost": urlEncodeListings = false + useAlreadyExists = false // untested case "IBMCOS": listObjectsV2 = false // untested virtualHostStyle = false urlEncodeListings = false useMultipartEtag = false // untested + useAlreadyExists = false // returns BucketAlreadyExists case "IDrive": virtualHostStyle = false + useAlreadyExists = false // untested case "IONOS": // listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2 virtualHostStyle = false urlEncodeListings = false + useAlreadyExists = false // untested case "Petabox": - // No quirks + useAlreadyExists = false // untested case "Liara": virtualHostStyle = false urlEncodeListings = false useMultipartEtag = false + useAlreadyExists = false // untested case "Linode": - // No quirks + useAlreadyExists = true // returns 200 OK case "LyveCloud": useMultipartEtag = false // LyveCloud seems to calculate multipart Etags differently from AWS + useAlreadyExists = false // untested case "Minio": virtualHostStyle = false case "Netease": listObjectsV2 = false // untested urlEncodeListings = false useMultipartEtag = false // untested + useAlreadyExists = false // untested case "RackCorp": // No quirks useMultipartEtag = false // untested + useAlreadyExists = false // untested case "Scaleway": // Scaleway can only have 1000 parts in an upload if opt.MaxUploadParts > 1000 { opt.MaxUploadParts = 1000 } urlEncodeListings = false + useAlreadyExists = false // untested case "SeaweedFS": listObjectsV2 = false // untested virtualHostStyle = false urlEncodeListings = false useMultipartEtag = false // untested + useAlreadyExists = false // untested case "StackPath": listObjectsV2 = false // untested virtualHostStyle = false urlEncodeListings = false + useAlreadyExists = false // untested case "Storj": // Force chunk size to >= 64 MiB if opt.ChunkSize < 64*fs.Mebi { opt.ChunkSize = 64 * fs.Mebi } + useAlreadyExists = false // returns BucketAlreadyExists case "Synology": useMultipartEtag = false + useAlreadyExists = false // untested case "TencentCOS": listObjectsV2 = false // untested useMultipartEtag = false // untested + useAlreadyExists = false // untested case "Wasabi": - // No quirks + useAlreadyExists = true // returns 200 OK case "Leviia": - // No quirks + useAlreadyExists = false // untested case "Qiniu": useMultipartEtag = false urlEncodeListings = false virtualHostStyle = false + useAlreadyExists = false // untested case "GCS": // Google break request Signature by mutating accept-encoding HTTP header // https://github.com/rclone/rclone/issues/6670 useAcceptEncodingGzip = false + useAlreadyExists = true // returns BucketNameUnavailable instead of BucketAlreadyExists but good enough! + default: + fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider) + fallthrough case "Other": listObjectsV2 = false virtualHostStyle = false urlEncodeListings = false useMultipartEtag = false - default: - fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider) - listObjectsV2 = false - virtualHostStyle = false - urlEncodeListings = false - useMultipartEtag = false + useAlreadyExists = false } // Path Style vs Virtual Host style @@ -3159,6 +3208,12 @@ func setQuirks(opt *Options) { opt.UseAcceptEncodingGzip.Valid = true opt.UseAcceptEncodingGzip.Value = useAcceptEncodingGzip } + + // Has the provider got AlreadyOwnedByYou error? + if !opt.UseAlreadyExists.Valid { + opt.UseAlreadyExists.Valid = true + opt.UseAlreadyExists.Value = useAlreadyExists + } } // setRoot changes the root of the Fs @@ -4187,8 +4242,17 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error { fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL) } if awsErr, ok := err.(awserr.Error); ok { - if code := awsErr.Code(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" { + switch awsErr.Code() { + case "BucketAlreadyOwnedByYou": err = nil + case "BucketAlreadyExists", "BucketNameUnavailable": + if f.opt.UseAlreadyExists.Value { + // We can trust BucketAlreadyExists to mean not owned by us, so make it non retriable + err = fserrors.NoRetryError(err) + } else { + // We can't trust BucketAlreadyExists to mean not owned by us, so ignore it + err = nil + } } } return err diff --git a/backend/s3/s3_internal_test.go b/backend/s3/s3_internal_test.go index f48045072..4e34b1c27 100644 --- a/backend/s3/s3_internal_test.go +++ b/backend/s3/s3_internal_test.go @@ -12,6 +12,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/s3" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" @@ -393,6 +394,41 @@ func (f *Fs) InternalTestVersions(t *testing.T) { } }) + t.Run("Mkdir", func(t *testing.T) { + // Test what happens when we create a bucket we already own and see whether the + // quirk is set correctly + req := s3.CreateBucketInput{ + Bucket: &f.rootBucket, + ACL: stringPointerOrNil(f.opt.BucketACL), + } + if f.opt.LocationConstraint != "" { + req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{ + LocationConstraint: &f.opt.LocationConstraint, + } + } + err := f.pacer.Call(func() (bool, error) { + _, err := f.c.CreateBucketWithContext(ctx, &req) + return f.shouldRetry(ctx, err) + }) + var errString string + if err == nil { + errString = "No Error" + } else if awsErr, ok := err.(awserr.Error); ok { + errString = awsErr.Code() + } else { + assert.Fail(t, "Unknown error %T %v", err, err) + } + t.Logf("Creating a bucket we already have created returned code: %s", errString) + switch errString { + case "BucketAlreadyExists": + assert.False(t, f.opt.UseAlreadyExists.Value, "Need to clear UseAlreadyExists quirk") + case "No Error", "BucketAlreadyOwnedByYou": + assert.True(t, f.opt.UseAlreadyExists.Value, "Need to set UseAlreadyExists quirk") + default: + assert.Fail(t, "Unknown error string %q", errString) + } + }) + t.Run("Cleanup", func(t *testing.T) { require.NoError(t, f.CleanUpHidden(ctx)) items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)