From fd9049c83d9ef181170d18467b16f35fc5f11599 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Sat, 3 Aug 2024 11:35:32 +0100 Subject: [PATCH] s3: update to using AWS SDK v2 - fixes #4989 SDK v2 conversion Changes - `--s3-sts-endpoint` is no longer supported - `--s3-use-unsigned-payload` to control use of trailer checksums (needed for non AWS) --- backend/s3/gen_setfrom.go | 12 +- backend/s3/s3.go | 863 +++++++++++++++------------------ backend/s3/s3_internal_test.go | 73 +-- backend/s3/s3_test.go | 20 +- backend/s3/setfrom.go | 34 +- backend/s3/v2sign.go | 22 +- docs/content/s3.md | 29 +- go.mod | 19 + go.sum | 38 ++ 9 files changed, 558 insertions(+), 552 deletions(-) diff --git a/backend/s3/gen_setfrom.go b/backend/s3/gen_setfrom.go index 9dd037086..4748b4da0 100644 --- a/backend/s3/gen_setfrom.go +++ b/backend/s3/gen_setfrom.go @@ -13,7 +13,8 @@ import ( "reflect" "strings" - "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" ) // flags @@ -82,15 +83,18 @@ func main() { package s3 -import "github.com/aws/aws-sdk-go/service/s3" +import ( + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" +) `) genSetFrom(new(s3.ListObjectsInput), new(s3.ListObjectsV2Input)) genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectsOutput)) genSetFrom(new(s3.ListObjectVersionsInput), new(s3.ListObjectsV2Input)) - genSetFrom(new(s3.ObjectVersion), new(s3.DeleteMarkerEntry)) + genSetFrom(new(types.ObjectVersion), new(types.DeleteMarkerEntry)) genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectVersionsOutput)) - genSetFrom(new(s3.Object), new(s3.ObjectVersion)) + genSetFrom(new(types.Object), new(types.ObjectVersion)) genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.HeadObjectOutput)) genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.CopyObjectInput)) genSetFrom(new(s3.UploadPartCopyInput), new(s3.CopyObjectInput)) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index cb8baf134..07f7e6d3c 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -3,8 +3,18 @@ package s3 //go:generate go run gen_setfrom.go -o setfrom.go +/* + SDK v2 conversion + + Changes + - `--s3-sts-endpoint` is no longer supported + + Untested + - v2auth + - fancy auth in ec2, kubernetes etc +*/ + import ( - "bytes" "context" "crypto/md5" "crypto/tls" @@ -26,19 +36,17 @@ import ( "sync" "time" - "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/aws/aws-sdk-go-v2/aws" + v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awsconfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" "github.com/ncw/swift/v2" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" @@ -2440,6 +2448,18 @@ object ending with "/", to persist the folder. Name: "use_multipart_etag", Help: `Whether to use ETag in multipart uploads for verification +This should be true, false or left unset to use the default for the provider. +`, + Default: fs.Tristate{}, + Advanced: true, + }, { + Name: "use_unsigned_payload", + Help: `Whether to use an unsigned payload in PutObject + +Rclone has to avoid the AWS SDK seeking the body when calling +PutObject. The AWS provider can add checksums in the trailer to avoid +seeking but other providers can't. + This should be true, false or left unset to use the default for the provider. `, Default: fs.Tristate{}, @@ -2553,9 +2573,10 @@ In this case, you might want to try disabling this option. Default: false, }, { Name: "sts_endpoint", - Help: "Endpoint for STS.\n\nLeave blank if using AWS to use the default endpoint for the region.", + Help: "Endpoint for STS (deprecated).\n\nLeave blank if using AWS to use the default endpoint for the region.", Provider: "AWS", Advanced: true, + Hide: fs.OptionHideBoth, }, { Name: "use_already_exists", Help: strings.ReplaceAll(`Set if rclone should report BucketAlreadyExists errors on bucket creation. @@ -2704,7 +2725,7 @@ type Options struct { V2Auth bool `config:"v2_auth"` UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"` LeavePartsOnError bool `config:"leave_parts_on_error"` - ListChunk int64 `config:"list_chunk"` + ListChunk int32 `config:"list_chunk"` ListVersion int `config:"list_version"` ListURLEncode fs.Tristate `config:"list_url_encode"` NoCheckBucket bool `config:"no_check_bucket"` @@ -2725,25 +2746,25 @@ type Options struct { NoSystemMetadata bool `config:"no_system_metadata"` UseAlreadyExists fs.Tristate `config:"use_already_exists"` UseMultipartUploads fs.Tristate `config:"use_multipart_uploads"` + UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"` } // Fs represents a remote s3 server type Fs struct { - name string // the name of the remote - root string // root of the bucket - ignore all objects above this - opt Options // parsed options - ci *fs.ConfigInfo // global config - ctx context.Context // global context for reading config - features *fs.Features // optional features - c *s3.S3 // the connection to the s3 server - ses *session.Session // the s3 session - rootBucket string // bucket part of root (if any) - rootDirectory string // directory part of root (if any) - cache *bucket.Cache // cache for bucket creation status - pacer *fs.Pacer // To pace the API calls - srv *http.Client // a plain http client - srvRest *rest.Client // the rest connection to the server - etagIsNotMD5 bool // if set ETags are not MD5s + name string // the name of the remote + root string // root of the bucket - ignore all objects above this + opt Options // parsed options + ci *fs.ConfigInfo // global config + ctx context.Context // global context for reading config + features *fs.Features // optional features + c *s3.Client // the connection to the s3 server + rootBucket string // bucket part of root (if any) + rootDirectory string // directory part of root (if any) + cache *bucket.Cache // cache for bucket creation status + pacer *fs.Pacer // To pace the API calls + srv *http.Client // a plain http client + srvRest *rest.Client // the rest connection to the server + etagIsNotMD5 bool // if set ETags are not MD5s versioningMu sync.Mutex versioning fs.Tristate // if set bucket is using versions warnCompressed sync.Once // warn once about compressed files @@ -2772,6 +2793,24 @@ type Object struct { contentLanguage *string // Content-Language: header } +// safely dereference the pointer, returning a zero T if nil +func deref[T any](p *T) T { + if p == nil { + var zero T + return zero + } + return *p +} + +// gets an http status code from err or returns -1 +func getHTTPStatusCode(err error) int { + var httpErr interface{ HTTPStatusCode() int } + if errors.As(err, &httpErr) { + return httpErr.HTTPStatusCode() + } + return -1 +} + // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) @@ -2815,33 +2854,35 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) { if fserrors.ContextError(ctx, &err) { return false, err } + // https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md#error-handling // If this is an awserr object, try and extract more useful information to determine if we should retry - if awsError, ok := err.(awserr.Error); ok { + var awsError smithy.APIError + if errors.As(err, &awsError) { // Simple case, check the original embedded error in case it's generically retryable - if fserrors.ShouldRetry(awsError.OrigErr()) { + if fserrors.ShouldRetry(awsError) { return true, err } // If it is a timeout then we want to retry that - if awsError.Code() == "RequestTimeout" { + if awsError.ErrorCode() == "RequestTimeout" { return true, err } - // Failing that, if it's a RequestFailure it's probably got an http status code we can check - if reqErr, ok := err.(awserr.RequestFailure); ok { - // 301 if wrong region for bucket - can only update if running from a bucket - if f.rootBucket != "" { - if reqErr.StatusCode() == http.StatusMovedPermanently { - urfbErr := f.updateRegionForBucket(ctx, f.rootBucket) - if urfbErr != nil { - fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr) - return false, err - } - return true, err + } + // Check http status code if available + if httpStatusCode := getHTTPStatusCode(err); httpStatusCode > 0 { + // 301 if wrong region for bucket - can only update if running from a bucket + if f.rootBucket != "" { + if httpStatusCode == http.StatusMovedPermanently { + urfbErr := f.updateRegionForBucket(ctx, f.rootBucket) + if urfbErr != nil { + fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr) + return false, err } + return true, err } - for _, e := range retryErrorCodes { - if reqErr.StatusCode() == e { - return true, err - } + } + for _, e := range retryErrorCodes { + if httpStatusCode == e { + return true, err } } } @@ -2890,164 +2931,94 @@ func getClient(ctx context.Context, opt *Options) *http.Client { } } -// Default name resolver -var defaultResolver = endpoints.DefaultResolver() - -// resolve (service, region) to endpoint -// -// Used to set endpoint for s3 services and not for other services -type resolver map[string]string - -// Add a service to the resolver, ignoring empty urls -func (r resolver) addService(service, url string) { - if url == "" { - return - } - if !strings.HasPrefix(url, "http") { - url = "https://" + url - } - r[service] = url -} - -// EndpointFor return the endpoint for s3 if set or the default if not -func (r resolver) EndpointFor(service, region string, opts ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { - fs.Debugf(nil, "Resolving service %q region %q", service, region) - url, ok := r[service] - if ok { - return endpoints.ResolvedEndpoint{ - URL: url, - SigningRegion: region, - }, nil - } - return defaultResolver.EndpointFor(service, region, opts...) -} - // s3Connection makes a connection to s3 -func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *session.Session, error) { +func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Client *s3.Client, err error) { ci := fs.GetConfig(ctx) - // Make the auth - v := credentials.Value{ - AccessKeyID: opt.AccessKeyID, - SecretAccessKey: opt.SecretAccessKey, - SessionToken: opt.SessionToken, - } + var awsConfig aws.Config - lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service - - def := defaults.Get() - def.Config.HTTPClient = lowTimeoutClient - - // start a new AWS session - awsSession, err := session.NewSession() - if err != nil { - return nil, nil, fmt.Errorf("NewSession: %w", err) - } - - // first provider to supply a credential set "wins" - providers := []credentials.Provider{ - // use static credentials if they're present (checked by provider) - &credentials.StaticProvider{Value: v}, - - // * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY - // * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY - &credentials.EnvProvider{}, - - // A SharedCredentialsProvider retrieves credentials - // from the current user's home directory. It checks - // AWS_SHARED_CREDENTIALS_FILE and AWS_PROFILE too. - &credentials.SharedCredentialsProvider{ - Filename: opt.SharedCredentialsFile, // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. - Profile: opt.Profile, // If empty will look gor "AWS_PROFILE" env var or "default" if not set. - }, - - // Pick up IAM role if we're in an ECS task - defaults.RemoteCredProvider(*def.Config, def.Handlers), - - // Pick up IAM role in case we're on EC2 - &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.New(awsSession, &aws.Config{ - HTTPClient: lowTimeoutClient, - }), - ExpiryWindow: 3 * time.Minute, - }, - } - cred := credentials.NewChainCredentials(providers) - - switch { - case opt.EnvAuth: - // No need for empty checks if "env_auth" is true - case v.AccessKeyID == "" && v.SecretAccessKey == "": - // if no access key/secret and iam is explicitly disabled then fall back to anon interaction - cred = credentials.AnonymousCredentials - fs.Debugf(nil, "Using anonymous credentials - did you mean to set env_auth=true?") - case v.AccessKeyID == "": - return nil, nil, errors.New("access_key_id not found") - case v.SecretAccessKey == "": - return nil, nil, errors.New("secret_access_key not found") + // Try to fill in the config from the environment if env_auth=true + if opt.EnvAuth { + configOpts := []func(*awsconfig.LoadOptions) error{} + // Set the name of the profile if supplied + if opt.Profile != "" { + configOpts = append(configOpts, awsconfig.WithSharedConfigProfile(opt.Profile)) + } + // Set the shared config file if supplied + if opt.SharedCredentialsFile != "" { + configOpts = append(configOpts, awsconfig.WithSharedConfigFiles([]string{opt.SharedCredentialsFile})) + } + awsConfig, err = awsconfig.LoadDefaultConfig(ctx, configOpts...) + if err != nil { + return nil, fmt.Errorf("couldn't load configuration with env_auth=true: %w", err) + } + } else { + switch { + case opt.AccessKeyID == "" && opt.SecretAccessKey == "": + // if no access key/secret and iam is explicitly disabled then fall back to anon interaction + awsConfig.Credentials = aws.AnonymousCredentials{} + fs.Debugf(nil, "Using anonymous credentials - did you mean to set env_auth=true?") + case opt.AccessKeyID == "": + return nil, errors.New("access_key_id not found") + case opt.SecretAccessKey == "": + return nil, errors.New("secret_access_key not found") + default: + // Make the static auth + v := aws.Credentials{ + AccessKeyID: opt.AccessKeyID, + SecretAccessKey: opt.SecretAccessKey, + SessionToken: opt.SessionToken, + } + awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: v} + } } if opt.Region == "" { opt.Region = "us-east-1" } setQuirks(opt) - awsConfig := aws.NewConfig(). - WithMaxRetries(ci.LowLevelRetries). - WithCredentials(cred). - WithHTTPClient(client). - WithS3ForcePathStyle(opt.ForcePathStyle). - WithS3UseAccelerate(opt.UseAccelerateEndpoint). - WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint) + awsConfig.RetryMaxAttempts = ci.LowLevelRetries + awsConfig.HTTPClient = client + + options := []func(*s3.Options){} + options = append(options, func(s3Opt *s3.Options) { + s3Opt.UsePathStyle = opt.ForcePathStyle + s3Opt.UseAccelerate = opt.UseAccelerateEndpoint + // FIXME maybe this should be a tristate so can default to DualStackEndpointStateUnset? + if opt.UseDualStack { + s3Opt.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateEnabled + } else { + s3Opt.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateDisabled + } + // FIXME not ported from SDK v1 - not sure what this does + // s3Opt.UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint + }) if opt.Region != "" { - awsConfig.WithRegion(opt.Region) + awsConfig.Region = opt.Region } - if opt.Endpoint != "" || opt.STSEndpoint != "" { - // If endpoints are set, override the relevant services only - r := make(resolver) - r.addService("s3", opt.Endpoint) - r.addService("sts", opt.STSEndpoint) - awsConfig.WithEndpointResolver(r) + if opt.STSEndpoint != "" { + // FIXME not sure if anyone is using this + // Haven't figured out how to do it with the v2 SDK + return nil, errors.New("--s3-sts-endpoint is no longer supported with the v2 SDK - please make an issue") } - if opt.UseDualStack { - awsConfig.UseDualStackEndpoint = endpoints.DualStackEndpointStateEnabled + if opt.Endpoint != "" { + if !strings.HasPrefix(opt.Endpoint, "http") { + opt.Endpoint = "https://" + opt.Endpoint + } + options = append(options, func(s3Opt *s3.Options) { + s3Opt.BaseEndpoint = &opt.Endpoint + }) } - // awsConfig.WithLogLevel(aws.LogDebugWithSigning) - awsSessionOpts := session.Options{ - Config: *awsConfig, - } - if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" { - // Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env) - awsSessionOpts.SharedConfigState = session.SharedConfigEnable - // Set the name of the profile if supplied - awsSessionOpts.Profile = opt.Profile - // Set the shared config file if supplied - if opt.SharedCredentialsFile != "" { - awsSessionOpts.SharedConfigFiles = []string{opt.SharedCredentialsFile} - } - // The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source - // (from the shared config file) if the passed-in Options.Config.Credentials is nil. - awsSessionOpts.Config.Credentials = nil - } - ses, err := session.NewSessionWithOptions(awsSessionOpts) - if err != nil { - return nil, nil, err - } - c := s3.New(ses) if opt.V2Auth || opt.Region == "other-v2-signature" { fs.Debugf(nil, "Using v2 auth") - signer := func(req *request.Request) { - // Ignore AnonymousCredentials object - if req.Config.Credentials == credentials.AnonymousCredentials { - return - } - sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest) - } - c.Handlers.Sign.Clear() - c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) - c.Handlers.Sign.PushBack(signer) + options = append(options, func(s3Opt *s3.Options) { + s3Opt.HTTPSignerV4 = &v2Signer{opt: opt} + }) } - return c, ses, nil + + c := s3.NewFromConfig(awsConfig, options...) + return c, nil } func checkUploadChunkSize(cs fs.SizeSuffix) error { @@ -3150,11 +3121,13 @@ func setQuirks(opt *Options) { mightGzip = true // assume all providers might use content encoding gzip until proven otherwise useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket useMultipartUploads = true // Set if provider supports multipart uploads + useUnsignedPayload = true // Do we need to use unsigned payloads to avoid seeking in PutObject ) switch opt.Provider { case "AWS": // No quirks - mightGzip = false // Never auto gzips objects + mightGzip = false // Never auto gzips objects + useUnsignedPayload = false // AWS has trailer support which means it adds checksums in the trailer without seeking case "Alibaba": useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS useAlreadyExists = true // returns 200 OK @@ -3354,6 +3327,11 @@ func setQuirks(opt *Options) { opt.UploadCutoff = math.MaxInt64 } + // Set the correct use multipart uploads if not manually set + if !opt.UseUnsignedPayload.Valid { + opt.UseUnsignedPayload.Valid = true + opt.UseUnsignedPayload.Value = useUnsignedPayload + } } // setRoot changes the root of the Fs @@ -3362,14 +3340,6 @@ func (f *Fs) setRoot(root string) { f.rootBucket, f.rootDirectory = bucket.Split(f.root) } -// return a pointer to the string if non empty or nil if it is empty -func stringPointerOrNil(s string) *string { - if s == "" { - return nil - } - return &s -} - // NewFs constructs an Fs from the path, bucket:path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct @@ -3412,7 +3382,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:]) } srv := getClient(ctx, opt) - c, ses, err := s3Connection(ctx, opt, srv) + c, err := s3Connection(ctx, opt, srv) if err != nil { return nil, err } @@ -3430,7 +3400,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e ci: ci, ctx: ctx, c: c, - ses: ses, pacer: pc, cache: bucket.NewCache(), srv: srv, @@ -3497,7 +3466,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e // This is needed to find versioned objects from their paths. // // It may return info == nil and err == nil if a HEAD would be more appropriate -func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *s3.Object, versionID *string, err error) { +func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *types.Object, versionID *string, err error) { bucket, bucketPath := f.split(wantRemote) // Strip the version string off if using versions @@ -3519,7 +3488,7 @@ func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *s findFile: true, versionAt: f.opt.VersionAt, hidden: f.opt.VersionDeleted, - }, func(gotRemote string, object *s3.Object, objectVersionID *string, isDirectory bool) error { + }, func(gotRemote string, object *types.Object, objectVersionID *string, isDirectory bool) error { if isDirectory { return nil } @@ -3542,6 +3511,14 @@ func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *s return info, versionID, nil } +// stringClone clones the string s into new memory. This is useful to +// stop us keeping references to small strings carved out of large XML +// responses. +func stringClone(s string) *string { + var sNew = strings.Clone(s) + return &sNew +} + // stringClonePointer clones the string pointed to by sp into new // memory. This is useful to stop us keeping references to small // strings carved out of large XML responses. @@ -3549,14 +3526,14 @@ func stringClonePointer(sp *string) *string { if sp == nil { return nil } - var s = *sp + var s = strings.Clone(*sp) return &s } // Return an Object from a path // // If it can't be found it returns the error ErrorObjectNotFound. -func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Object, versionID *string) (obj fs.Object, err error) { +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *types.Object, versionID *string) (obj fs.Object, err error) { o := &Object{ fs: f, remote: remote, @@ -3576,9 +3553,9 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Obje } else { o.lastModified = *info.LastModified } - o.setMD5FromEtag(aws.StringValue(info.ETag)) - o.bytes = aws.Int64Value(info.Size) - o.storageClass = stringClonePointer(info.StorageClass) + o.setMD5FromEtag(deref(info.ETag)) + o.bytes = deref(info.Size) + o.storageClass = stringClone(string(info.StorageClass)) o.versionID = stringClonePointer(versionID) // If is delete marker, show that metadata has been read as there is none to read if info.Size == isDeleteMarker { @@ -3601,9 +3578,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { // Gets the bucket location func (f *Fs) getBucketLocation(ctx context.Context, bucket string) (string, error) { - region, err := s3manager.GetBucketRegion(ctx, f.ses, bucket, "", func(r *request.Request) { - r.Config.S3ForcePathStyle = aws.Bool(f.opt.ForcePathStyle) - }) + region, err := manager.GetBucketRegion(ctx, f.c, bucket) if err != nil { return "", err } @@ -3617,22 +3592,21 @@ func (f *Fs) updateRegionForBucket(ctx context.Context, bucket string) error { if err != nil { return fmt.Errorf("reading bucket location failed: %w", err) } - if aws.StringValue(f.c.Config.Endpoint) != "" { + if f.opt.Endpoint != "" { return fmt.Errorf("can't set region to %q as endpoint is set", region) } - if aws.StringValue(f.c.Config.Region) == region { + if f.opt.Region == region { return fmt.Errorf("region is already %q - not updating", region) } // Make a new session with the new region oldRegion := f.opt.Region f.opt.Region = region - c, ses, err := s3Connection(f.ctx, &f.opt, f.srv) + c, err := s3Connection(f.ctx, &f.opt, f.srv) if err != nil { return fmt.Errorf("creating new session failed: %w", err) } f.c = c - f.ses = ses fs.Logf(f, "Switched region to %q from %q", region, oldRegion) return nil @@ -3663,14 +3637,14 @@ func (f *Fs) newV1List(req *s3.ListObjectsV2Input) bucketLister { // List a bucket with V1 listing func (ls *v1List) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versionIDs []*string, err error) { - respv1, err := ls.f.c.ListObjectsWithContext(ctx, &ls.req) + respv1, err := ls.f.c.ListObjects(ctx, &ls.req) if err != nil { return nil, nil, err } // Set up the request for next time ls.req.Marker = respv1.NextMarker - if aws.BoolValue(respv1.IsTruncated) && ls.req.Marker == nil { + if deref(respv1.IsTruncated) && ls.req.Marker == nil { if len(respv1.Contents) == 0 { return nil, nil, errors.New("s3 protocol error: received listing v1 with IsTruncated set, no NextMarker and no Contents") } @@ -3680,7 +3654,7 @@ func (ls *v1List) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versi } // If we are URL encoding then must decode the marker - if ls.req.Marker != nil && ls.req.EncodingType != nil { + if ls.req.Marker != nil && ls.req.EncodingType == types.EncodingTypeUrl { *ls.req.Marker, err = url.QueryUnescape(*ls.req.Marker) if err != nil { return nil, nil, fmt.Errorf("failed to URL decode Marker %q: %w", *ls.req.Marker, err) @@ -3698,9 +3672,9 @@ func (ls *v1List) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versi // URL Encode the listings func (ls *v1List) URLEncodeListings(encode bool) { if encode { - ls.req.EncodingType = aws.String(s3.EncodingTypeUrl) + ls.req.EncodingType = types.EncodingTypeUrl } else { - ls.req.EncodingType = nil + ls.req.EncodingType = types.EncodingType("") } } @@ -3720,11 +3694,11 @@ func (f *Fs) newV2List(req *s3.ListObjectsV2Input) bucketLister { // Do a V2 listing func (ls *v2List) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versionIDs []*string, err error) { - resp, err = ls.f.c.ListObjectsV2WithContext(ctx, &ls.req) + resp, err = ls.f.c.ListObjectsV2(ctx, &ls.req) if err != nil { return nil, nil, err } - if aws.BoolValue(resp.IsTruncated) && (resp.NextContinuationToken == nil || *resp.NextContinuationToken == "") { + if deref(resp.IsTruncated) && (resp.NextContinuationToken == nil || *resp.NextContinuationToken == "") { return nil, nil, errors.New("s3 protocol error: received listing v2 with IsTruncated set and no NextContinuationToken. Should you be using `--s3-list-version 1`?") } ls.req.ContinuationToken = resp.NextContinuationToken @@ -3734,9 +3708,9 @@ func (ls *v2List) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versi // URL Encode the listings func (ls *v2List) URLEncodeListings(encode bool) { if encode { - ls.req.EncodingType = aws.String(s3.EncodingTypeUrl) + ls.req.EncodingType = types.EncodingTypeUrl } else { - ls.req.EncodingType = nil + ls.req.EncodingType = types.EncodingType("") } } @@ -3764,13 +3738,13 @@ func (f *Fs) newVersionsList(req *s3.ListObjectsV2Input, hidden bool, versionAt return l } -// Any s3.Object or s3.ObjectVersion with this as their Size are delete markers +// Any types.Object or types.ObjectVersion with this as their Size are delete markers var isDeleteMarker = new(int64) -// Compare two s3.ObjectVersions, sorted alphabetically by key with +// Compare two types.ObjectVersions, sorted alphabetically by key with // the newest first if the Keys match or the one with IsLatest set if // everything matches. -func versionLess(a, b *s3.ObjectVersion) bool { +func versionLess(a, b *types.ObjectVersion) bool { if a == nil || a.Key == nil || a.LastModified == nil { return true } @@ -3790,7 +3764,7 @@ func versionLess(a, b *s3.ObjectVersion) bool { if dt < 0 { return false } - if aws.BoolValue(a.IsLatest) { + if deref(a.IsLatest) { return true } return false @@ -3803,17 +3777,17 @@ func versionLess(a, b *s3.ObjectVersion) bool { // so we need to merge them back again // // We do this by converting the s3.DeleteEntry into -// s3.ObjectVersion with Size = isDeleteMarker to tell them apart +// types.ObjectVersion with Size = isDeleteMarker to tell them apart // // We then merge them back into the Versions in the correct order -func mergeDeleteMarkers(oldVersions []*s3.ObjectVersion, deleteMarkers []*s3.DeleteMarkerEntry) (newVersions []*s3.ObjectVersion) { - newVersions = make([]*s3.ObjectVersion, 0, len(oldVersions)+len(deleteMarkers)) +func mergeDeleteMarkers(oldVersions []types.ObjectVersion, deleteMarkers []types.DeleteMarkerEntry) (newVersions []types.ObjectVersion) { + newVersions = make([]types.ObjectVersion, 0, len(oldVersions)+len(deleteMarkers)) for _, deleteMarker := range deleteMarkers { - var obj = new(s3.ObjectVersion) + var obj types.ObjectVersion //structs.SetFrom(obj, deleteMarker) - setFrom_s3ObjectVersion_s3DeleteMarkerEntry(obj, deleteMarker) + setFrom_typesObjectVersion_typesDeleteMarkerEntry(&obj, &deleteMarker) obj.Size = isDeleteMarker - for len(oldVersions) > 0 && versionLess(oldVersions[0], obj) { + for len(oldVersions) > 0 && versionLess(&oldVersions[0], &obj) { newVersions = append(newVersions, oldVersions[0]) oldVersions = oldVersions[1:] } @@ -3826,7 +3800,7 @@ func mergeDeleteMarkers(oldVersions []*s3.ObjectVersion, deleteMarkers []*s3.Del // List a bucket with versions func (ls *versionsList) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versionIDs []*string, err error) { - respVersions, err := ls.f.c.ListObjectVersionsWithContext(ctx, &ls.req) + respVersions, err := ls.f.c.ListObjectVersions(ctx, &ls.req) if err != nil { return nil, nil, err } @@ -3834,12 +3808,12 @@ func (ls *versionsList) List(ctx context.Context) (resp *s3.ListObjectsV2Output, // Set up the request for next time ls.req.KeyMarker = respVersions.NextKeyMarker ls.req.VersionIdMarker = respVersions.NextVersionIdMarker - if aws.BoolValue(respVersions.IsTruncated) && ls.req.KeyMarker == nil { + if deref(respVersions.IsTruncated) && ls.req.KeyMarker == nil { return nil, nil, errors.New("s3 protocol error: received versions listing with IsTruncated set with no NextKeyMarker") } // If we are URL encoding then must decode the marker - if ls.req.KeyMarker != nil && ls.req.EncodingType != nil { + if ls.req.KeyMarker != nil && ls.req.EncodingType == types.EncodingTypeUrl { *ls.req.KeyMarker, err = url.QueryUnescape(*ls.req.KeyMarker) if err != nil { return nil, nil, fmt.Errorf("failed to URL decode KeyMarker %q: %w", *ls.req.KeyMarker, err) @@ -3851,16 +3825,16 @@ func (ls *versionsList) List(ctx context.Context) (resp *s3.ListObjectsV2Output, //structs.SetFrom(resp, respVersions) setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput(resp, respVersions) - // Merge in delete Markers as s3.ObjectVersion if we need them + // Merge in delete Markers as types.ObjectVersion if we need them if ls.hidden || ls.usingVersionAt { respVersions.Versions = mergeDeleteMarkers(respVersions.Versions, respVersions.DeleteMarkers) } - // Convert the Versions and the DeleteMarkers into an array of s3.Object + // Convert the Versions and the DeleteMarkers into an array of types.Object // // These are returned in the order that they are stored with the most recent first. // With the annoyance that the Versions and DeleteMarkers are split into two - objs := make([]*s3.Object, 0, len(respVersions.Versions)) + objs := make([]types.Object, 0, len(respVersions.Versions)) for _, objVersion := range respVersions.Versions { if ls.usingVersionAt { if objVersion.LastModified.After(ls.versionAt) { @@ -3877,11 +3851,11 @@ func (ls *versionsList) List(ctx context.Context) (resp *s3.ListObjectsV2Output, if !ls.hidden && objVersion.Size == isDeleteMarker { continue } - var obj = new(s3.Object) + var obj types.Object //structs.SetFrom(obj, objVersion) - setFrom_s3Object_s3ObjectVersion(obj, objVersion) + setFrom_typesObject_typesObjectVersion(&obj, &objVersion) // Adjust the file names - if !ls.usingVersionAt && (!aws.BoolValue(objVersion.IsLatest) || objVersion.Size == isDeleteMarker) { + if !ls.usingVersionAt && (!deref(objVersion.IsLatest) || objVersion.Size == isDeleteMarker) { if obj.Key != nil && objVersion.LastModified != nil { *obj.Key = version.Add(*obj.Key, *objVersion.LastModified) } @@ -3897,14 +3871,14 @@ func (ls *versionsList) List(ctx context.Context) (resp *s3.ListObjectsV2Output, // URL Encode the listings func (ls *versionsList) URLEncodeListings(encode bool) { if encode { - ls.req.EncodingType = aws.String(s3.EncodingTypeUrl) + ls.req.EncodingType = types.EncodingTypeUrl } else { - ls.req.EncodingType = nil + ls.req.EncodingType = types.EncodingType("") } } // listFn is called from list to handle an object. -type listFn func(remote string, object *s3.Object, versionID *string, isDirectory bool) error +type listFn func(remote string, object *types.Object, versionID *string, isDirectory bool) error // errEndList is a sentinel used to end the list iteration now. // listFn should return it to end the iteration with no errors. @@ -3963,11 +3937,10 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error { MaxKeys: &f.opt.ListChunk, } if opt.restoreStatus { - restoreStatus := "RestoreStatus" - req.OptionalObjectAttributes = []*string{&restoreStatus} + req.OptionalObjectAttributes = []types.OptionalObjectAttributes{types.OptionalObjectAttributesRestoreStatus} } if f.opt.RequesterPays { - req.RequestPayer = aws.String(s3.RequestPayerRequester) + req.RequestPayer = types.RequestPayerRequester } var listBucket bucketLister switch { @@ -3984,37 +3957,31 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error { var err error var versionIDs []*string err = f.pacer.Call(func() (bool, error) { + listBucket.URLEncodeListings(urlEncodeListings) resp, versionIDs, err = listBucket.List(ctx) if err != nil && !urlEncodeListings { - if awsErr, ok := err.(awserr.RequestFailure); ok { - if origErr := awsErr.OrigErr(); origErr != nil { - if _, ok := origErr.(*xml.SyntaxError); ok { - // Retry the listing with URL encoding as there were characters that XML can't encode - urlEncodeListings = true - fs.Debugf(f, "Retrying listing because of characters which can't be XML encoded") - return true, err - } - } + var xmlErr *xml.SyntaxError + if errors.As(err, &xmlErr) { + // Retry the listing with URL encoding as there were characters that XML can't encode + urlEncodeListings = true + fs.Debugf(f, "Retrying listing because of characters which can't be XML encoded") + return true, err } } return f.shouldRetry(ctx, err) }) if err != nil { - if awsErr, ok := err.(awserr.RequestFailure); ok { - if awsErr.StatusCode() == http.StatusNotFound { - err = fs.ErrorDirNotFound - } + if getHTTPStatusCode(err) == http.StatusNotFound { + err = fs.ErrorDirNotFound } if f.rootBucket == "" { // if listing from the root ignore wrong region requests returning // empty directory - if reqErr, ok := err.(awserr.RequestFailure); ok { - // 301 if wrong region for bucket - if reqErr.StatusCode() == http.StatusMovedPermanently { - fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", opt.bucket) - return nil - } + // 301 if wrong region for bucket + if getHTTPStatusCode(err) == http.StatusMovedPermanently { + fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", opt.bucket) + return nil } } return err @@ -4044,7 +4011,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error { remote = bucket.Join(opt.bucket, remote) } remote = strings.TrimSuffix(remote, "/") - err = fn(remote, &s3.Object{Key: &remote}, nil, true) + err = fn(remote, &types.Object{Key: &remote}, nil, true) if err != nil { if err == errEndList { return nil @@ -4055,11 +4022,11 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error { } foundItems += len(resp.Contents) for i, object := range resp.Contents { - remote := aws.StringValue(object.Key) + remote := deref(object.Key) if urlEncodeListings { remote, err = url.QueryUnescape(remote) if err != nil { - fs.Logf(f, "failed to URL decode %q in listing: %v", aws.StringValue(object.Key), err) + fs.Logf(f, "failed to URL decode %q in listing: %v", deref(object.Key), err) continue } } @@ -4090,9 +4057,9 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error { remote = bucket.Join(opt.bucket, remote) } if versionIDs != nil { - err = fn(remote, object, versionIDs[i], isDirectory) + err = fn(remote, &object, versionIDs[i], isDirectory) } else { - err = fn(remote, object, nil, isDirectory) + err = fn(remote, &object, nil, isDirectory) } if err != nil { if err == errEndList { @@ -4101,7 +4068,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error { return err } } - if !aws.BoolValue(resp.IsTruncated) { + if !deref(resp.IsTruncated) { break } } @@ -4123,7 +4090,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error { } // Convert a list item into a DirEntry -func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *s3.Object, versionID *string, isDirectory bool) (fs.DirEntry, error) { +func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *types.Object, versionID *string, isDirectory bool) (fs.DirEntry, error) { if isDirectory { size := int64(0) if object.Size != nil { @@ -4150,7 +4117,7 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB withVersions: f.opt.Versions, versionAt: f.opt.VersionAt, hidden: f.opt.VersionDeleted, - }, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error { + }, func(remote string, object *types.Object, versionID *string, isDirectory bool) error { entry, err := f.itemToDirEntry(ctx, remote, object, versionID, isDirectory) if err != nil { return err @@ -4173,16 +4140,16 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) req := s3.ListBucketsInput{} var resp *s3.ListBucketsOutput err = f.pacer.Call(func() (bool, error) { - resp, err = f.c.ListBucketsWithContext(ctx, &req) + resp, err = f.c.ListBuckets(ctx, &req) return f.shouldRetry(ctx, err) }) if err != nil { return nil, err } for _, bucket := range resp.Buckets { - bucketName := f.opt.Enc.ToStandardName(aws.StringValue(bucket.Name)) + bucketName := f.opt.Enc.ToStandardName(deref(bucket.Name)) f.cache.MarkOK(bucketName) - d := fs.NewDir(bucketName, aws.TimeValue(bucket.CreationDate)) + d := fs.NewDir(bucketName, deref(bucket.CreationDate)) entries = append(entries, d) } return entries, nil @@ -4237,7 +4204,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) ( withVersions: f.opt.Versions, versionAt: f.opt.VersionAt, hidden: f.opt.VersionDeleted, - }, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error { + }, func(remote string, object *types.Object, versionID *string, isDirectory bool) error { entry, err := f.itemToDirEntry(ctx, remote, object, versionID, isDirectory) if err != nil { return err @@ -4297,16 +4264,14 @@ func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) { Bucket: &bucket, } err := f.pacer.Call(func() (bool, error) { - _, err := f.c.HeadBucketWithContext(ctx, &req) + _, err := f.c.HeadBucket(ctx, &req) return f.shouldRetry(ctx, err) }) if err == nil { return true, nil } - if err, ok := err.(awserr.RequestFailure); ok { - if err.StatusCode() == http.StatusNotFound { - return false, nil - } + if getHTTPStatusCode(err) == http.StatusNotFound { + return false, nil } return false, err } @@ -4385,22 +4350,23 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error { return f.cache.Create(bucket, func() error { req := s3.CreateBucketInput{ Bucket: &bucket, - ACL: stringPointerOrNil(f.opt.BucketACL), + ACL: types.BucketCannedACL(f.opt.BucketACL), } if f.opt.LocationConstraint != "" { - req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{ - LocationConstraint: &f.opt.LocationConstraint, + req.CreateBucketConfiguration = &types.CreateBucketConfiguration{ + LocationConstraint: types.BucketLocationConstraint(f.opt.LocationConstraint), } } err := f.pacer.Call(func() (bool, error) { - _, err := f.c.CreateBucketWithContext(ctx, &req) + _, err := f.c.CreateBucket(ctx, &req) return f.shouldRetry(ctx, err) }) if err == nil { fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL) } - if awsErr, ok := err.(awserr.Error); ok { - switch awsErr.Code() { + var awsErr smithy.APIError + if errors.As(err, &awsErr) { + switch awsErr.ErrorCode() { case "BucketAlreadyOwnedByYou": err = nil case "BucketAlreadyExists", "BucketNameUnavailable": @@ -4444,7 +4410,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error { Bucket: &bucket, } err := f.pacer.Call(func() (bool, error) { - _, err := f.c.DeleteBucketWithContext(ctx, &req) + _, err := f.c.DeleteBucket(ctx, &req) return f.shouldRetry(ctx, err) }) if err == nil { @@ -4471,7 +4437,7 @@ func pathEscape(s string) string { // method func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, src *Object) error { req.Bucket = &dstBucket - req.ACL = stringPointerOrNil(f.opt.ACL) + req.ACL = types.ObjectCannedACL(f.opt.ACL) req.Key = &dstPath source := pathEscape(bucket.Join(srcBucket, srcPath)) if src.versionID != nil { @@ -4479,10 +4445,10 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa } req.CopySource = &source if f.opt.RequesterPays { - req.RequestPayer = aws.String(s3.RequestPayerRequester) + req.RequestPayer = types.RequestPayerRequester } if f.opt.ServerSideEncryption != "" { - req.ServerSideEncryption = &f.opt.ServerSideEncryption + req.ServerSideEncryption = types.ServerSideEncryption(f.opt.ServerSideEncryption) } if f.opt.SSECustomerAlgorithm != "" { req.SSECustomerAlgorithm = &f.opt.SSECustomerAlgorithm @@ -4499,15 +4465,15 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa if f.opt.SSEKMSKeyID != "" { req.SSEKMSKeyId = &f.opt.SSEKMSKeyID } - if req.StorageClass == nil && f.opt.StorageClass != "" { - req.StorageClass = &f.opt.StorageClass + if req.StorageClass == types.StorageClass("") && f.opt.StorageClass != "" { + req.StorageClass = types.StorageClass(f.opt.StorageClass) } if src.bytes >= int64(f.opt.CopyCutoff) { return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, src) } return f.pacer.Call(func() (bool, error) { - _, err := f.c.CopyObjectWithContext(ctx, req) + _, err := f.c.CopyObject(ctx, req) return f.shouldRetry(ctx, err) }) } @@ -4539,7 +4505,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst // If copy metadata was set then set the Metadata to that read // from the head request - if aws.StringValue(copyReq.MetadataDirective) == s3.MetadataDirectiveCopy { + if copyReq.MetadataDirective == types.MetadataDirectiveCopy { copyReq.Metadata = info.Metadata } @@ -4553,7 +4519,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst var cout *s3.CreateMultipartUploadOutput if err := f.pacer.Call(func() (bool, error) { var err error - cout, err = f.c.CreateMultipartUploadWithContext(ctx, req) + cout, err = f.c.CreateMultipartUpload(ctx, req) return f.shouldRetry(ctx, err) }); err != nil { return err @@ -4564,7 +4530,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst // Try to abort the upload, but ignore the error. fs.Debugf(src, "Cancelling multipart copy") _ = f.pacer.Call(func() (bool, error) { - _, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{ + _, err := f.c.AbortMultipartUpload(context.Background(), &s3.AbortMultipartUploadInput{ Bucket: &dstBucket, Key: &dstPath, UploadId: uid, @@ -4581,11 +4547,11 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst fs.Debugf(src, "Starting multipart copy with %d parts", numParts) var ( - parts = make([]*s3.CompletedPart, numParts) + parts = make([]types.CompletedPart, numParts) g, gCtx = errgroup.WithContext(ctx) ) g.SetLimit(f.opt.UploadConcurrency) - for partNum := int64(1); partNum <= numParts; partNum++ { + for partNum := int32(1); int64(partNum) <= numParts; partNum++ { // Fail fast, in case an errgroup managed function returns an error // gCtx is cancelled. There is no point in uploading all the other parts. if gCtx.Err() != nil { @@ -4601,15 +4567,15 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst uploadPartReq.Key = &dstPath uploadPartReq.PartNumber = &partNum uploadPartReq.UploadId = uid - uploadPartReq.CopySourceRange = aws.String(calculateRange(partSize, partNum-1, numParts, srcSize)) + uploadPartReq.CopySourceRange = aws.String(calculateRange(partSize, int64(partNum-1), numParts, srcSize)) err := f.pacer.Call(func() (bool, error) { - uout, err = f.c.UploadPartCopyWithContext(gCtx, uploadPartReq) + uout, err = f.c.UploadPartCopy(gCtx, uploadPartReq) return f.shouldRetry(gCtx, err) }) if err != nil { return err } - parts[partNum-1] = &s3.CompletedPart{ + parts[partNum-1] = types.CompletedPart{ PartNumber: &partNum, ETag: uout.CopyPartResult.ETag, } @@ -4623,10 +4589,10 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst } return f.pacer.Call(func() (bool, error) { - _, err := f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{ + _, err := f.c.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{ Bucket: &dstBucket, Key: &dstPath, - MultipartUpload: &s3.CompletedMultipartUpload{ + MultipartUpload: &types.CompletedMultipartUpload{ Parts: parts, }, RequestPayer: req.RequestPayer, @@ -4662,7 +4628,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, srcBucket, srcPath := srcObj.split() req := s3.CopyObjectInput{ - MetadataDirective: aws.String(s3.MetadataDirectiveCopy), + MetadataDirective: types.MetadataDirectiveCopy, } // Update the metadata if it is in use @@ -4672,7 +4638,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, return nil, fmt.Errorf("failed to prepare upload: %w", err) } setFrom_s3CopyObjectInput_s3PutObjectInput(&req, ui.req) - req.MetadataDirective = aws.String(s3.MetadataDirectiveReplace) + req.MetadataDirective = types.MetadataDirectiveReplace } err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath, srcObj) @@ -4701,14 +4667,16 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, fs.Logf(f, "Public Link: Reducing expiry to %v as %v is greater than the max time allowed", maxExpireDuration, expire) expire = maxExpireDuration } - bucket, bucketPath := o.split() - httpReq, _ := f.c.GetObjectRequest(&s3.GetObjectInput{ + bucket, bucketPath := f.split(remote) + httpReq, err := s3.NewPresignClient(f.c).PresignGetObject(ctx, &s3.GetObjectInput{ Bucket: &bucket, Key: &bucketPath, VersionId: o.versionID, - }) - - return httpReq.Presign(time.Duration(expire)) + }, s3.WithPresignExpires(time.Duration(expire))) + if err != nil { + return "", err + } + return httpReq.URL, nil } var commandHelp = []fs.CommandHelp{{ @@ -4906,18 +4874,19 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str req := s3.RestoreObjectInput{ //Bucket: &f.rootBucket, //Key: &encodedDirectory, - RestoreRequest: &s3.RestoreRequest{}, + RestoreRequest: &types.RestoreRequest{}, } if lifetime := opt["lifetime"]; lifetime != "" { ilifetime, err := strconv.ParseInt(lifetime, 10, 64) + ilifetime32 := int32(ilifetime) if err != nil { return nil, fmt.Errorf("bad lifetime: %w", err) } - req.RestoreRequest.Days = &ilifetime + req.RestoreRequest.Days = &ilifetime32 } if priority := opt["priority"]; priority != "" { - req.RestoreRequest.GlacierJobParameters = &s3.GlacierJobParameters{ - Tier: &priority, + req.RestoreRequest.GlacierJobParameters = &types.GlacierJobParameters{ + Tier: types.Tier(priority), } } if description := opt["description"]; description != "" { @@ -4957,7 +4926,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str reqCopy.Key = &bucketPath reqCopy.VersionId = o.versionID err = f.pacer.Call(func() (bool, error) { - _, err = f.c.RestoreObject(&reqCopy) + _, err = f.c.RestoreObject(ctx, &reqCopy) return f.shouldRetry(ctx, err) }) if err != nil { @@ -4992,12 +4961,11 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str if err != nil { return nil, fmt.Errorf("reading config: %w", err) } - c, ses, err := s3Connection(f.ctx, &newOpt, f.srv) + c, err := s3Connection(f.ctx, &newOpt, f.srv) if err != nil { return nil, fmt.Errorf("updating session: %w", err) } f.c = c - f.ses = ses f.opt = newOpt keys := []string{} for k := range opt { @@ -5014,8 +4982,8 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str type restoreStatusOut struct { Remote string VersionID *string - RestoreStatus *s3.RestoreStatus - StorageClass *string + RestoreStatus *types.RestoreStatus + StorageClass types.ObjectStorageClass } // Recursively enumerate the current fs to find objects with a restore status @@ -5033,7 +5001,7 @@ func (f *Fs) restoreStatus(ctx context.Context, all bool) (out []restoreStatusOu versionAt: f.opt.VersionAt, hidden: f.opt.VersionDeleted, restoreStatus: true, - }, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error { + }, func(remote string, object *types.Object, versionID *string, isDirectory bool) error { entry, err := f.itemToDirEntry(ctx, remote, object, versionID, isDirectory) if err != nil { return err @@ -5063,12 +5031,12 @@ func (f *Fs) restoreStatus(ctx context.Context, all bool) (out []restoreStatusOu // Note that rather lazily we treat key as a prefix so it matches // directories and objects. This could surprise the user if they ask // for "dir" and it returns "dirKey" -func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uploads []*s3.MultipartUpload, err error) { +func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uploads []types.MultipartUpload, err error) { var ( keyMarker *string uploadIDMarker *string ) - uploads = []*s3.MultipartUpload{} + uploads = []types.MultipartUpload{} for { req := s3.ListMultipartUploadsInput{ Bucket: &bucket, @@ -5079,14 +5047,14 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uplo } var resp *s3.ListMultipartUploadsOutput err = f.pacer.Call(func() (bool, error) { - resp, err = f.c.ListMultipartUploads(&req) + resp, err = f.c.ListMultipartUploads(ctx, &req) return f.shouldRetry(ctx, err) }) if err != nil { return nil, fmt.Errorf("list multipart uploads bucket %q key %q: %w", bucket, key, err) } uploads = append(uploads, resp.Uploads...) - if !aws.BoolValue(resp.IsTruncated) { + if !deref(resp.IsTruncated) { break } keyMarker = resp.NextKeyMarker @@ -5095,8 +5063,8 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uplo return uploads, nil } -func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]*s3.MultipartUpload, err error) { - uploadsMap = make(map[string][]*s3.MultipartUpload) +func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]types.MultipartUpload, err error) { + uploadsMap = make(map[string][]types.MultipartUpload) bucket, directory := f.split("") if bucket != "" { uploads, err := f.listMultipartUploads(ctx, bucket, directory) @@ -5123,7 +5091,7 @@ func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string } // cleanUpBucket removes all pending multipart uploads for a given bucket over the age of maxAge -func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Duration, uploads []*s3.MultipartUpload) (err error) { +func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Duration, uploads []types.MultipartUpload) (err error) { fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge) for _, upload := range uploads { if upload.Initiated != nil && upload.Key != nil && upload.UploadId != nil { @@ -5139,7 +5107,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Durat UploadId: upload.UploadId, Key: upload.Key, } - _, abortErr := f.c.AbortMultipartUpload(&req) + _, abortErr := f.c.AbortMultipartUpload(ctx, &req) if abortErr != nil { err = fmt.Errorf("failed to remove %s: %w", what, abortErr) fs.Errorf(f, "%v", err) @@ -5184,7 +5152,7 @@ func (f *Fs) isVersioned(ctx context.Context) bool { // Pass no arguments to get, or pass "Enabled" or "Suspended" // // Updates f.versioning -func (f *Fs) setGetVersioning(ctx context.Context, arg ...string) (status string, err error) { +func (f *Fs) setGetVersioning(ctx context.Context, arg ...string) (status types.BucketVersioningStatus, err error) { if len(arg) > 1 { return "", errors.New("too many arguments") } @@ -5192,19 +5160,19 @@ func (f *Fs) setGetVersioning(ctx context.Context, arg ...string) (status string return "", errors.New("need a bucket") } if len(arg) == 1 { - var versioning = s3.VersioningConfiguration{ - Status: aws.String(arg[0]), + var versioning = types.VersioningConfiguration{ + Status: types.BucketVersioningStatus(arg[0]), } // Disabled is indicated by the parameter missing - if *versioning.Status == "Disabled" { - versioning.Status = aws.String("") + if versioning.Status == types.BucketVersioningStatus("Disabled") { + versioning.Status = types.BucketVersioningStatus("") } req := s3.PutBucketVersioningInput{ Bucket: &f.rootBucket, VersioningConfiguration: &versioning, } err := f.pacer.Call(func() (bool, error) { - _, err = f.c.PutBucketVersioningWithContext(ctx, &req) + _, err = f.c.PutBucketVersioning(ctx, &req) return f.shouldRetry(ctx, err) }) if err != nil { @@ -5216,7 +5184,7 @@ func (f *Fs) setGetVersioning(ctx context.Context, arg ...string) (status string } var resp *s3.GetBucketVersioningOutput err = f.pacer.Call(func() (bool, error) { - resp, err = f.c.GetBucketVersioningWithContext(ctx, &req) + resp, err = f.c.GetBucketVersioning(ctx, &req) return f.shouldRetry(ctx, err) }) f.versioning.Valid = true @@ -5225,11 +5193,11 @@ func (f *Fs) setGetVersioning(ctx context.Context, arg ...string) (status string fs.Errorf(f, "Failed to read versioning status, assuming unversioned: %v", err) return "", err } - if resp.Status == nil { + if len(resp.Status) == 0 { return "Unversioned", err } f.versioning.Value = true - return *resp.Status, err + return resp.Status, err } // CleanUp removes all pending multipart uploads older than 24 hours @@ -5283,7 +5251,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error { withVersions: versioned, hidden: true, noSkipMarkers: true, - }, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error { + }, func(remote string, object *types.Object, versionID *string, isDirectory bool) error { if isDirectory { return nil } @@ -5303,16 +5271,16 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error { if oldOnly && isCurrentVersion { // Check current version of the file if object.Size == isDeleteMarker { - fs.Debugf(remote, "Deleting current version (id %q) as it is a delete marker", aws.StringValue(versionID)) + fs.Debugf(remote, "Deleting current version (id %q) as it is a delete marker", deref(versionID)) delChan <- oi } else { - fs.Debugf(remote, "Not deleting current version %q", aws.StringValue(versionID)) + fs.Debugf(remote, "Not deleting current version %q", deref(versionID)) } } else { if object.Size == isDeleteMarker { - fs.Debugf(remote, "Deleting delete marker (id %q)", aws.StringValue(versionID)) + fs.Debugf(remote, "Deleting delete marker (id %q)", deref(versionID)) } else { - fs.Debugf(remote, "Deleting (id %q)", aws.StringValue(versionID)) + fs.Debugf(remote, "Deleting (id %q)", deref(versionID)) } delChan <- oi } @@ -5415,7 +5383,7 @@ func (o *Object) headObject(ctx context.Context) (resp *s3.HeadObjectOutput, err func (f *Fs) headObject(ctx context.Context, req *s3.HeadObjectInput) (resp *s3.HeadObjectOutput, err error) { if f.opt.RequesterPays { - req.RequestPayer = aws.String(s3.RequestPayerRequester) + req.RequestPayer = types.RequestPayerRequester } if f.opt.SSECustomerAlgorithm != "" { req.SSECustomerAlgorithm = &f.opt.SSECustomerAlgorithm @@ -5428,14 +5396,12 @@ func (f *Fs) headObject(ctx context.Context, req *s3.HeadObjectInput) (resp *s3. } err = f.pacer.Call(func() (bool, error) { var err error - resp, err = f.c.HeadObjectWithContext(ctx, req) + resp, err = f.c.HeadObject(ctx, req) return f.shouldRetry(ctx, err) }) if err != nil { - if awsErr, ok := err.(awserr.RequestFailure); ok { - if awsErr.StatusCode() == http.StatusNotFound || awsErr.StatusCode() == http.StatusMethodNotAllowed { - return nil, fs.ErrorObjectNotFound - } + if statusCode := getHTTPStatusCode(err); statusCode == http.StatusNotFound || statusCode == http.StatusMethodNotAllowed { + return nil, fs.ErrorObjectNotFound } return nil, err } @@ -5461,25 +5427,19 @@ func (o *Object) readMetaData(ctx context.Context) (err error) { return nil } -// Convert S3 metadata with pointers into a map[string]string -// while lowercasing the keys -func s3MetadataToMap(s3Meta map[string]*string) map[string]string { +// Convert S3 metadata into a map[string]string while lowercasing the +// keys +func s3MetadataToMap(s3Meta map[string]string) map[string]string { meta := make(map[string]string, len(s3Meta)) for k, v := range s3Meta { - if v != nil { - meta[strings.ToLower(k)] = *v - } + meta[strings.ToLower(k)] = v } return meta } // Convert our metadata back into S3 metadata -func mapToS3Metadata(meta map[string]string) map[string]*string { - s3Meta := make(map[string]*string, len(meta)) - for k, v := range meta { - s3Meta[k] = aws.String(v) - } - return s3Meta +func mapToS3Metadata(meta map[string]string) map[string]string { + return meta } func (o *Object) setMetaData(resp *s3.HeadObjectOutput) { @@ -5488,7 +5448,7 @@ func (o *Object) setMetaData(resp *s3.HeadObjectOutput) { if resp.ContentLength != nil { o.bytes = *resp.ContentLength } - o.setMD5FromEtag(aws.StringValue(resp.ETag)) + o.setMD5FromEtag(deref(resp.ETag)) o.meta = s3MetadataToMap(resp.Metadata) // Read MD5 from metadata if present if md5sumBase64, ok := o.meta[metaMD5Hash]; ok { @@ -5515,17 +5475,17 @@ func (o *Object) setMetaData(resp *s3.HeadObjectOutput) { o.lastModified = *resp.LastModified } } - o.mimeType = aws.StringValue(resp.ContentType) + o.mimeType = deref(resp.ContentType) // Set system metadata - o.storageClass = resp.StorageClass + o.storageClass = (*string)(&resp.StorageClass) o.cacheControl = resp.CacheControl o.contentDisposition = resp.ContentDisposition o.contentEncoding = resp.ContentEncoding o.contentLanguage = resp.ContentLanguage // If decompressing then size and md5sum are unknown - if o.fs.opt.Decompress && aws.StringValue(o.contentEncoding) == "gzip" { + if o.fs.opt.Decompress && deref(o.contentEncoding) == "gzip" { o.bytes = -1 o.md5 = "" } @@ -5576,10 +5536,10 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { req := s3.CopyObjectInput{ ContentType: aws.String(fs.MimeType(ctx, o)), // Guess the content type Metadata: mapToS3Metadata(o.meta), - MetadataDirective: aws.String(s3.MetadataDirectiveReplace), // replace metadata with that passed in + MetadataDirective: types.MetadataDirectiveReplace, // replace metadata with that passed in } if o.fs.opt.RequesterPays { - req.RequestPayer = aws.String(s3.RequestPayerRequester) + req.RequestPayer = types.RequestPayerRequester } return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o) } @@ -5615,12 +5575,12 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options fs.Debugf(o, "Failed to parse last modified from string %s, %v", resp.Header.Get("Last-Modified"), err) } - metaData := make(map[string]*string) + metaData := make(map[string]string) for key, value := range resp.Header { key = strings.ToLower(key) if strings.HasPrefix(key, "x-amz-meta-") { metaKey := strings.TrimPrefix(key, "x-amz-meta-") - metaData[metaKey] = &value[0] + metaData[metaKey] = value[0] } } @@ -5642,7 +5602,7 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options ContentEncoding: header("Content-Encoding"), ContentLanguage: header("Content-Language"), ContentType: header("Content-Type"), - StorageClass: header("X-Amz-Storage-Class"), + StorageClass: types.StorageClass(*header("X-Amz-Storage-Class")), } o.setMetaData(&head) return resp.Body, err @@ -5662,7 +5622,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read VersionId: o.versionID, } if o.fs.opt.RequesterPays { - req.RequestPayer = aws.String(s3.RequestPayerRequester) + req.RequestPayer = types.RequestPayerRequester } if o.fs.opt.SSECustomerAlgorithm != "" { req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm @@ -5673,13 +5633,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read if o.fs.opt.SSECustomerKeyMD5 != "" { req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5 } - httpReq, resp := o.fs.c.GetObjectRequest(&req) + // httpReq, err := s3.NewPresignClient(o.fs.c).PresignGetObject(ctx, &req) + // if err != nil { + // return nil, err + // } fs.FixRangeOption(options, o.bytes) + var APIOptions []func(*middleware.Stack) error + // Override the automatic decompression in the transport to // download compressed files as-is if o.fs.opt.UseAcceptEncodingGzip.Value { - httpReq.HTTPRequest.Header.Set("Accept-Encoding", "gzip") + APIOptions = append(APIOptions, smithyhttp.AddHeaderValue("Accept-Encoding", "gzip")) } for _, option := range options { @@ -5689,21 +5654,23 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read req.Range = &value case *fs.HTTPOption: key, value := option.Header() - httpReq.HTTPRequest.Header.Add(key, value) + APIOptions = append(APIOptions, smithyhttp.AddHeaderValue(key, value)) default: if option.Mandatory() { fs.Logf(o, "Unsupported mandatory option: %v", option) } } } + + var resp *s3.GetObjectOutput err = o.fs.pacer.Call(func() (bool, error) { var err error - httpReq.HTTPRequest = httpReq.HTTPRequest.WithContext(ctx) - err = httpReq.Send() + resp, err = o.fs.c.GetObject(ctx, &req, s3.WithAPIOptions(APIOptions...)) return o.fs.shouldRetry(ctx, err) }) - if err, ok := err.(awserr.RequestFailure); ok { - if err.Code() == "InvalidObjectState" { + var awsError smithy.APIError + if errors.As(err, &awsError) { + if awsError.ErrorCode() == "InvalidObjectState" { return nil, fmt.Errorf("Object in GLACIER, restore first: bucket=%q, key=%q", bucket, bucketPath) } } @@ -5734,7 +5701,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read o.setMetaData(&head) // Decompress body if necessary - if aws.StringValue(resp.ContentEncoding) == "gzip" { + if deref(resp.ContentEncoding) == "gzip" { if o.fs.opt.Decompress || (resp.ContentLength == nil && o.fs.opt.MightGzip.Value) { return readers.NewGzipReader(resp.Body) } @@ -5758,7 +5725,7 @@ type s3ChunkWriter struct { uploadID *string multiPartUploadInput *s3.CreateMultipartUploadInput completedPartsMu sync.Mutex - completedParts []*s3.CompletedPart + completedParts []types.CompletedPart eTag string versionID string md5sMu sync.Mutex @@ -5811,7 +5778,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn var mOut *s3.CreateMultipartUploadOutput err = f.pacer.Call(func() (bool, error) { - mOut, err = f.c.CreateMultipartUploadWithContext(ctx, &mReq) + mOut, err = f.c.CreateMultipartUpload(ctx, &mReq) if err == nil { if mOut == nil { err = fserrors.RetryErrorf("internal error: no info from multipart upload") @@ -5833,7 +5800,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn key: mOut.Key, uploadID: mOut.UploadId, multiPartUploadInput: &mReq, - completedParts: make([]*s3.CompletedPart, 0), + completedParts: make([]types.CompletedPart, 0), ui: ui, o: o, } @@ -5847,10 +5814,10 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn } // add a part number and etag to the completed parts -func (w *s3ChunkWriter) addCompletedPart(partNum *int64, eTag *string) { +func (w *s3ChunkWriter) addCompletedPart(partNum *int32, eTag *string) { w.completedPartsMu.Lock() defer w.completedPartsMu.Unlock() - w.completedParts = append(w.completedParts, &s3.CompletedPart{ + w.completedParts = append(w.completedParts, types.CompletedPart{ PartNumber: partNum, ETag: eTag, }) @@ -5898,7 +5865,7 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:]) // S3 requires 1 <= PartNumber <= 10000 - s3PartNumber := aws.Int64(int64(chunkNumber + 1)) + s3PartNumber := aws.Int32(int32(chunkNumber + 1)) uploadPartReq := &s3.UploadPartInput{ Body: reader, Bucket: w.bucket, @@ -5919,7 +5886,7 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader if err != nil { return false, err } - uout, err = w.f.c.UploadPartWithContext(ctx, uploadPartReq) + uout, err = w.f.c.UploadPart(ctx, uploadPartReq) if err != nil { if chunkNumber <= 8 { return w.f.shouldRetry(ctx, err) @@ -5942,7 +5909,7 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader // Abort the multipart upload func (w *s3ChunkWriter) Abort(ctx context.Context) error { err := w.f.pacer.Call(func() (bool, error) { - _, err := w.f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{ + _, err := w.f.c.AbortMultipartUpload(context.Background(), &s3.AbortMultipartUploadInput{ Bucket: w.bucket, Key: w.key, UploadId: w.uploadID, @@ -5965,10 +5932,10 @@ func (w *s3ChunkWriter) Close(ctx context.Context) (err error) { }) var resp *s3.CompleteMultipartUploadOutput err = w.f.pacer.Call(func() (bool, error) { - resp, err = w.f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{ + resp, err = w.f.c.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{ Bucket: w.bucket, Key: w.key, - MultipartUpload: &s3.CompletedMultipartUpload{ + MultipartUpload: &types.CompletedMultipartUpload{ Parts: w.completedParts, }, RequestPayer: w.multiPartUploadInput.RequestPayer, @@ -6010,62 +5977,31 @@ func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.R return wantETag, gotETag, versionID, s3cw.ui, nil } -// unWrapAwsError unwraps AWS errors, looking for a non AWS error -// -// It returns true if one was found and the error, or false and the -// error passed in. -func unWrapAwsError(err error) (found bool, outErr error) { - if awsErr, ok := err.(awserr.Error); ok { - var origErrs []error - if batchErr, ok := awsErr.(awserr.BatchedErrors); ok { - origErrs = batchErr.OrigErrs() - } else { - origErrs = []error{awsErr.OrigErr()} - } - for _, origErr := range origErrs { - found, newErr := unWrapAwsError(origErr) - if found { - return found, newErr - } - } - return false, err - } - return true, err -} - // Upload a single part using PutObject func (o *Object) uploadSinglepartPutObject(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (etag string, lastModified time.Time, versionID *string, err error) { - r, resp := o.fs.c.PutObjectRequest(req) - if req.ContentLength != nil && *req.ContentLength == 0 { - // Can't upload zero length files like this for some reason - r.Body = bytes.NewReader([]byte{}) - } else { - r.SetStreamingBody(io.NopCloser(in)) + req.Body = io.NopCloser(in) + var options = []func(*s3.Options){} + if o.fs.opt.UseUnsignedPayload.Value { + options = append(options, s3.WithAPIOptions( + // avoids operation error S3: PutObject, failed to compute payload hash: failed to seek body to start, request stream is not seekable + v4signer.SwapComputePayloadSHA256ForUnsignedPayloadMiddleware, + )) } - r.SetContext(ctx) - r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD") - + // Can't retry single part uploads as only have an io.Reader + options = append(options, func(s3opt *s3.Options) { + s3opt.RetryMaxAttempts = 1 + }) + var resp *s3.PutObjectOutput err = o.fs.pacer.CallNoRetry(func() (bool, error) { - err := r.Send() + resp, err = o.fs.c.PutObject(ctx, req, options...) return o.fs.shouldRetry(ctx, err) }) if err != nil { - // Return the underlying error if we have a - // Serialization or RequestError error if possible - // - // These errors are synthesized locally in the SDK - // (not returned from the server) and we'd rather have - // the underlying error if there is one. - if do, ok := err.(awserr.Error); ok && (do.Code() == request.ErrCodeSerialization || do.Code() == request.ErrCodeRequestError) { - if found, newErr := unWrapAwsError(err); found { - err = newErr - } - } return etag, lastModified, nil, err } lastModified = time.Now() if resp != nil { - etag = aws.StringValue(resp.ETag) + etag = deref(resp.ETag) versionID = resp.VersionId } return etag, lastModified, versionID, nil @@ -6073,35 +6009,25 @@ func (o *Object) uploadSinglepartPutObject(ctx context.Context, req *s3.PutObjec // Upload a single part using a presigned request func (o *Object) uploadSinglepartPresignedRequest(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (etag string, lastModified time.Time, versionID *string, err error) { - // Create the request - putObj, _ := o.fs.c.PutObjectRequest(req) - - // Sign it so we can upload using a presigned request. - // - // Note the SDK didn't used to support streaming to - // PutObject so we used this work-around. - url, headers, err := putObj.PresignRequest(15 * time.Minute) + // Create the presigned request + putReq, err := s3.NewPresignClient(o.fs.c).PresignPutObject(ctx, req, s3.WithPresignExpires(15*time.Minute)) if err != nil { return etag, lastModified, nil, fmt.Errorf("s3 upload: sign request: %w", err) } - if o.fs.opt.V2Auth && headers == nil { - headers = putObj.HTTPRequest.Header - } - // Set request to nil if empty so as not to make chunked encoding if size == 0 { in = nil } // create the vanilla http request - httpReq, err := http.NewRequestWithContext(ctx, "PUT", url, in) + httpReq, err := http.NewRequestWithContext(ctx, "PUT", putReq.URL, in) if err != nil { return etag, lastModified, nil, fmt.Errorf("s3 upload: new request: %w", err) } // set the headers we signed and the length - httpReq.Header = headers + httpReq.Header = putReq.SignedHeader httpReq.ContentLength = size var resp *http.Response @@ -6159,7 +6085,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [ ui.req = &s3.PutObjectInput{ Bucket: &bucket, - ACL: stringPointerOrNil(o.fs.opt.ACL), + ACL: types.ObjectCannedACL(o.fs.opt.ACL), Key: &bucketPath, } @@ -6168,13 +6094,13 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [ if err != nil { return ui, fmt.Errorf("failed to read metadata from source object: %w", err) } - ui.req.Metadata = make(map[string]*string, len(meta)+2) + ui.req.Metadata = make(map[string]string, len(meta)+2) // merge metadata into request and user metadata for k, v := range meta { pv := aws.String(v) k = strings.ToLower(k) if o.fs.opt.NoSystemMetadata { - ui.req.Metadata[k] = pv + ui.req.Metadata[k] = v continue } switch k { @@ -6202,14 +6128,14 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [ } case "btime": // write as metadata since we can't set it - ui.req.Metadata[k] = pv + ui.req.Metadata[k] = v default: - ui.req.Metadata[k] = pv + ui.req.Metadata[k] = v } } // Set the mtime in the meta data - ui.req.Metadata[metaMtime] = aws.String(swift.TimeToFloatString(modTime)) + ui.req.Metadata[metaMtime] = swift.TimeToFloatString(modTime) // read the md5sum if available // - for non multipart @@ -6231,7 +6157,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [ // - a multipart upload // - the Etag is not an MD5, eg when using SSE/SSE-C // provided checksums aren't disabled - ui.req.Metadata[metaMD5Hash] = &md5sumBase64 + ui.req.Metadata[metaMD5Hash] = md5sumBase64 } } } @@ -6248,10 +6174,10 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [ ui.req.ContentMD5 = &md5sumBase64 } if o.fs.opt.RequesterPays { - ui.req.RequestPayer = aws.String(s3.RequestPayerRequester) + ui.req.RequestPayer = types.RequestPayerRequester } if o.fs.opt.ServerSideEncryption != "" { - ui.req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption + ui.req.ServerSideEncryption = types.ServerSideEncryption(o.fs.opt.ServerSideEncryption) } if o.fs.opt.SSECustomerAlgorithm != "" { ui.req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm @@ -6266,7 +6192,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [ ui.req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID } if o.fs.opt.StorageClass != "" { - ui.req.StorageClass = &o.fs.opt.StorageClass + ui.req.StorageClass = types.StorageClass(o.fs.opt.StorageClass) } // Apply upload options for _, option := range options { @@ -6291,7 +6217,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [ const amzMetaPrefix = "x-amz-meta-" if strings.HasPrefix(lowerKey, amzMetaPrefix) { metaKey := lowerKey[len(amzMetaPrefix):] - ui.req.Metadata[metaKey] = aws.String(value) + ui.req.Metadata[metaKey] = value } else { fs.Errorf(o, "Don't know how to set key %q on upload", key) } @@ -6303,11 +6229,8 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [ if !httpguts.ValidHeaderFieldName(key) { fs.Errorf(o, "Dropping invalid metadata key %q", key) delete(ui.req.Metadata, key) - } else if value == nil { - fs.Errorf(o, "Dropping nil metadata value for key %q", key) - delete(ui.req.Metadata, key) - } else if !httpguts.ValidHeaderFieldValue(*value) { - fs.Errorf(o, "Dropping invalid metadata value %q for key %q", *value, key) + } else if !httpguts.ValidHeaderFieldValue(value) { + fs.Errorf(o, "Dropping invalid metadata value %q for key %q", value, key) delete(ui.req.Metadata, key) } } @@ -6405,10 +6328,10 @@ func (o *Object) Remove(ctx context.Context) error { VersionId: o.versionID, } if o.fs.opt.RequesterPays { - req.RequestPayer = aws.String(s3.RequestPayerRequester) + req.RequestPayer = types.RequestPayerRequester } err := o.fs.pacer.Call(func() (bool, error) { - _, err := o.fs.c.DeleteObjectWithContext(ctx, &req) + _, err := o.fs.c.DeleteObject(ctx, &req) return o.fs.shouldRetry(ctx, err) }) return err @@ -6430,8 +6353,8 @@ func (o *Object) SetTier(tier string) (err error) { tier = strings.ToUpper(tier) bucket, bucketPath := o.split() req := s3.CopyObjectInput{ - MetadataDirective: aws.String(s3.MetadataDirectiveCopy), - StorageClass: aws.String(tier), + MetadataDirective: types.MetadataDirectiveCopy, + StorageClass: types.StorageClass(tier), } err = o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o) if err != nil { diff --git a/backend/s3/s3_internal_test.go b/backend/s3/s3_internal_test.go index 5ac299d2a..5b68fdb08 100644 --- a/backend/s3/s3_internal_test.go +++ b/backend/s3/s3_internal_test.go @@ -5,15 +5,17 @@ import ( "compress/gzip" "context" "crypto/md5" + "errors" "fmt" "path" "strings" "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/hash" @@ -131,20 +133,20 @@ func TestVersionLess(t *testing.T) { t1 := fstest.Time("2022-01-21T12:00:00+01:00") t2 := fstest.Time("2022-01-21T12:00:01+01:00") for n, test := range []struct { - a, b *s3.ObjectVersion + a, b *types.ObjectVersion want bool }{ {a: nil, b: nil, want: true}, - {a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: nil, want: false}, - {a: nil, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true}, - {a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false}, - {a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t2}, want: false}, - {a: &s3.ObjectVersion{Key: &key1, LastModified: &t2}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true}, - {a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key2, LastModified: &t1}, want: true}, - {a: &s3.ObjectVersion{Key: &key2, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false}, - {a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false}, - {a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true}, - {a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, want: false}, + {a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: nil, want: false}, + {a: nil, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true}, + {a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false}, + {a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t2}, want: false}, + {a: &types.ObjectVersion{Key: &key1, LastModified: &t2}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true}, + {a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key2, LastModified: &t1}, want: true}, + {a: &types.ObjectVersion{Key: &key2, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false}, + {a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false}, + {a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true}, + {a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, want: false}, } { got := versionLess(test.a, test.b) assert.Equal(t, test.want, got, fmt.Sprintf("%d: %+v", n, test)) @@ -157,24 +159,24 @@ func TestMergeDeleteMarkers(t *testing.T) { t1 := fstest.Time("2022-01-21T12:00:00+01:00") t2 := fstest.Time("2022-01-21T12:00:01+01:00") for n, test := range []struct { - versions []*s3.ObjectVersion - markers []*s3.DeleteMarkerEntry - want []*s3.ObjectVersion + versions []types.ObjectVersion + markers []types.DeleteMarkerEntry + want []types.ObjectVersion }{ { - versions: []*s3.ObjectVersion{}, - markers: []*s3.DeleteMarkerEntry{}, - want: []*s3.ObjectVersion{}, + versions: []types.ObjectVersion{}, + markers: []types.DeleteMarkerEntry{}, + want: []types.ObjectVersion{}, }, { - versions: []*s3.ObjectVersion{ + versions: []types.ObjectVersion{ { Key: &key1, LastModified: &t1, }, }, - markers: []*s3.DeleteMarkerEntry{}, - want: []*s3.ObjectVersion{ + markers: []types.DeleteMarkerEntry{}, + want: []types.ObjectVersion{ { Key: &key1, LastModified: &t1, @@ -182,14 +184,14 @@ func TestMergeDeleteMarkers(t *testing.T) { }, }, { - versions: []*s3.ObjectVersion{}, - markers: []*s3.DeleteMarkerEntry{ + versions: []types.ObjectVersion{}, + markers: []types.DeleteMarkerEntry{ { Key: &key1, LastModified: &t1, }, }, - want: []*s3.ObjectVersion{ + want: []types.ObjectVersion{ { Key: &key1, LastModified: &t1, @@ -198,7 +200,7 @@ func TestMergeDeleteMarkers(t *testing.T) { }, }, { - versions: []*s3.ObjectVersion{ + versions: []types.ObjectVersion{ { Key: &key1, LastModified: &t2, @@ -208,13 +210,13 @@ func TestMergeDeleteMarkers(t *testing.T) { LastModified: &t2, }, }, - markers: []*s3.DeleteMarkerEntry{ + markers: []types.DeleteMarkerEntry{ { Key: &key1, LastModified: &t1, }, }, - want: []*s3.ObjectVersion{ + want: []types.ObjectVersion{ { Key: &key1, LastModified: &t2, @@ -399,22 +401,23 @@ func (f *Fs) InternalTestVersions(t *testing.T) { // quirk is set correctly req := s3.CreateBucketInput{ Bucket: &f.rootBucket, - ACL: stringPointerOrNil(f.opt.BucketACL), + ACL: types.BucketCannedACL(f.opt.BucketACL), } if f.opt.LocationConstraint != "" { - req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{ - LocationConstraint: &f.opt.LocationConstraint, + req.CreateBucketConfiguration = &types.CreateBucketConfiguration{ + LocationConstraint: types.BucketLocationConstraint(f.opt.LocationConstraint), } } err := f.pacer.Call(func() (bool, error) { - _, err := f.c.CreateBucketWithContext(ctx, &req) + _, err := f.c.CreateBucket(ctx, &req) return f.shouldRetry(ctx, err) }) var errString string + var awsError smithy.APIError if err == nil { errString = "No Error" - } else if awsErr, ok := err.(awserr.Error); ok { - errString = awsErr.Code() + } else if errors.As(err, &awsError) { + errString = awsError.ErrorCode() } else { assert.Fail(t, "Unknown error %T %v", err, err) } diff --git a/backend/s3/s3_test.go b/backend/s3/s3_test.go index 7e6e55360..e0499ba66 100644 --- a/backend/s3/s3_test.go +++ b/backend/s3/s3_test.go @@ -4,12 +4,14 @@ package s3 import ( "context" "net/http" - "strings" "testing" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func SetupS3Test(t *testing.T) (context.Context, *Options, *http.Client) { @@ -54,20 +56,16 @@ func TestAWSDualStackOption(t *testing.T) { // test enabled ctx, opt, client := SetupS3Test(t) opt.UseDualStack = true - s3Conn, _, _ := s3Connection(ctx, opt, client) - if !strings.Contains(s3Conn.Endpoint, "dualstack") { - t.Errorf("dualstack failed got: %s, wanted: dualstack", s3Conn.Endpoint) - t.Fail() - } + s3Conn, err := s3Connection(ctx, opt, client) + require.NoError(t, err) + assert.Equal(t, aws.DualStackEndpointStateEnabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint) } { // test default case ctx, opt, client := SetupS3Test(t) - s3Conn, _, _ := s3Connection(ctx, opt, client) - if strings.Contains(s3Conn.Endpoint, "dualstack") { - t.Errorf("dualstack failed got: %s, NOT wanted: dualstack", s3Conn.Endpoint) - t.Fail() - } + s3Conn, err := s3Connection(ctx, opt, client) + require.NoError(t, err) + assert.Equal(t, aws.DualStackEndpointStateDisabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint) } } diff --git a/backend/s3/setfrom.go b/backend/s3/setfrom.go index 27eb2aafe..439e8b6b0 100644 --- a/backend/s3/setfrom.go +++ b/backend/s3/setfrom.go @@ -2,7 +2,10 @@ package s3 -import "github.com/aws/aws-sdk-go/service/s3" +import ( + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" +) // setFrom_s3ListObjectsInput_s3ListObjectsV2Input copies matching elements from a to b func setFrom_s3ListObjectsInput_s3ListObjectsV2Input(a *s3.ListObjectsInput, b *s3.ListObjectsV2Input) { @@ -27,6 +30,7 @@ func setFrom_s3ListObjectsV2Output_s3ListObjectsOutput(a *s3.ListObjectsV2Output a.Name = b.Name a.Prefix = b.Prefix a.RequestCharged = b.RequestCharged + a.ResultMetadata = b.ResultMetadata } // setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input copies matching elements from a to b @@ -41,8 +45,8 @@ func setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input(a *s3.ListObjectVers a.RequestPayer = b.RequestPayer } -// setFrom_s3ObjectVersion_s3DeleteMarkerEntry copies matching elements from a to b -func setFrom_s3ObjectVersion_s3DeleteMarkerEntry(a *s3.ObjectVersion, b *s3.DeleteMarkerEntry) { +// setFrom_typesObjectVersion_typesDeleteMarkerEntry copies matching elements from a to b +func setFrom_typesObjectVersion_typesDeleteMarkerEntry(a *types.ObjectVersion, b *types.DeleteMarkerEntry) { a.IsLatest = b.IsLatest a.Key = b.Key a.LastModified = b.LastModified @@ -60,10 +64,11 @@ func setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput(a *s3.ListObjectsV a.Name = b.Name a.Prefix = b.Prefix a.RequestCharged = b.RequestCharged + a.ResultMetadata = b.ResultMetadata } -// setFrom_s3Object_s3ObjectVersion copies matching elements from a to b -func setFrom_s3Object_s3ObjectVersion(a *s3.Object, b *s3.ObjectVersion) { +// setFrom_typesObject_typesObjectVersion copies matching elements from a to b +func setFrom_typesObject_typesObjectVersion(a *types.Object, b *types.ObjectVersion) { a.ChecksumAlgorithm = b.ChecksumAlgorithm a.ETag = b.ETag a.Key = b.Key @@ -71,7 +76,6 @@ func setFrom_s3Object_s3ObjectVersion(a *s3.Object, b *s3.ObjectVersion) { a.Owner = b.Owner a.RestoreStatus = b.RestoreStatus a.Size = b.Size - a.StorageClass = b.StorageClass } // setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput copies matching elements from a to b @@ -82,6 +86,7 @@ func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipa a.ContentEncoding = b.ContentEncoding a.ContentLanguage = b.ContentLanguage a.ContentType = b.ContentType + a.Expires = b.Expires a.Metadata = b.Metadata a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus a.ObjectLockMode = b.ObjectLockMode @@ -96,8 +101,9 @@ func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipa // setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput copies matching elements from a to b func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipartUploadInput, b *s3.CopyObjectInput) { - a.ACL = b.ACL a.Bucket = b.Bucket + a.Key = b.Key + a.ACL = b.ACL a.BucketKeyEnabled = b.BucketKeyEnabled a.CacheControl = b.CacheControl a.ChecksumAlgorithm = b.ChecksumAlgorithm @@ -111,7 +117,6 @@ func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipar a.GrantRead = b.GrantRead a.GrantReadACP = b.GrantReadACP a.GrantWriteACP = b.GrantWriteACP - a.Key = b.Key a.Metadata = b.Metadata a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus a.ObjectLockMode = b.ObjectLockMode @@ -132,6 +137,7 @@ func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipar func setFrom_s3UploadPartCopyInput_s3CopyObjectInput(a *s3.UploadPartCopyInput, b *s3.CopyObjectInput) { a.Bucket = b.Bucket a.CopySource = b.CopySource + a.Key = b.Key a.CopySourceIfMatch = b.CopySourceIfMatch a.CopySourceIfModifiedSince = b.CopySourceIfModifiedSince a.CopySourceIfNoneMatch = b.CopySourceIfNoneMatch @@ -141,7 +147,6 @@ func setFrom_s3UploadPartCopyInput_s3CopyObjectInput(a *s3.UploadPartCopyInput, a.CopySourceSSECustomerKeyMD5 = b.CopySourceSSECustomerKeyMD5 a.ExpectedBucketOwner = b.ExpectedBucketOwner a.ExpectedSourceBucketOwner = b.ExpectedSourceBucketOwner - a.Key = b.Key a.RequestPayer = b.RequestPayer a.SSECustomerAlgorithm = b.SSECustomerAlgorithm a.SSECustomerKey = b.SSECustomerKey @@ -166,6 +171,7 @@ func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3. a.ETag = b.ETag a.Expiration = b.Expiration a.Expires = b.Expires + a.ExpiresString = b.ExpiresString a.LastModified = b.LastModified a.Metadata = b.Metadata a.MissingMeta = b.MissingMeta @@ -183,12 +189,14 @@ func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3. a.StorageClass = b.StorageClass a.VersionId = b.VersionId a.WebsiteRedirectLocation = b.WebsiteRedirectLocation + a.ResultMetadata = b.ResultMetadata } // setFrom_s3CreateMultipartUploadInput_s3PutObjectInput copies matching elements from a to b func setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(a *s3.CreateMultipartUploadInput, b *s3.PutObjectInput) { - a.ACL = b.ACL a.Bucket = b.Bucket + a.Key = b.Key + a.ACL = b.ACL a.BucketKeyEnabled = b.BucketKeyEnabled a.CacheControl = b.CacheControl a.ChecksumAlgorithm = b.ChecksumAlgorithm @@ -202,7 +210,6 @@ func setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(a *s3.CreateMultipart a.GrantRead = b.GrantRead a.GrantReadACP = b.GrantReadACP a.GrantWriteACP = b.GrantWriteACP - a.Key = b.Key a.Metadata = b.Metadata a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus a.ObjectLockMode = b.ObjectLockMode @@ -232,6 +239,7 @@ func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.P a.ContentLanguage = b.ContentLanguage a.ContentLength = b.ContentLength a.ContentType = b.ContentType + a.Expires = b.Expires a.Metadata = b.Metadata a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus a.ObjectLockMode = b.ObjectLockMode @@ -246,8 +254,9 @@ func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.P // setFrom_s3CopyObjectInput_s3PutObjectInput copies matching elements from a to b func setFrom_s3CopyObjectInput_s3PutObjectInput(a *s3.CopyObjectInput, b *s3.PutObjectInput) { - a.ACL = b.ACL a.Bucket = b.Bucket + a.Key = b.Key + a.ACL = b.ACL a.BucketKeyEnabled = b.BucketKeyEnabled a.CacheControl = b.CacheControl a.ChecksumAlgorithm = b.ChecksumAlgorithm @@ -261,7 +270,6 @@ func setFrom_s3CopyObjectInput_s3PutObjectInput(a *s3.CopyObjectInput, b *s3.Put a.GrantRead = b.GrantRead a.GrantReadACP = b.GrantReadACP a.GrantWriteACP = b.GrantWriteACP - a.Key = b.Key a.Metadata = b.Metadata a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus a.ObjectLockMode = b.ObjectLockMode diff --git a/backend/s3/v2sign.go b/backend/s3/v2sign.go index 12e41f0b6..45b1bf7f3 100644 --- a/backend/s3/v2sign.go +++ b/backend/s3/v2sign.go @@ -3,6 +3,7 @@ package s3 import ( + "context" "crypto/hmac" "crypto/sha1" "encoding/base64" @@ -10,6 +11,9 @@ import ( "sort" "strings" "time" + + "github.com/aws/aws-sdk-go-v2/aws" + v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4" ) // URL parameters that need to be added to the signature @@ -36,10 +40,17 @@ var s3ParamsToSign = map[string]struct{}{ "response-content-encoding": {}, } -// sign signs requests using v2 auth +// Implement HTTPSignerV4 interface +type v2Signer struct { + opt *Options +} + +// SignHTTP signs requests using v2 auth. // -// Cobbled together from goamz and aws-sdk-go -func sign(AccessKey, SecretKey string, req *http.Request) { +// Cobbled together from goamz and aws-sdk-go. +// +// Bodged up to compile with AWS SDK v2 +func (v2 *v2Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, req *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4signer.SignerOptions)) error { // Set date date := time.Now().UTC().Format(time.RFC1123) req.Header.Set("Date", date) @@ -107,11 +118,12 @@ func sign(AccessKey, SecretKey string, req *http.Request) { // Make signature payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri - hash := hmac.New(sha1.New, []byte(SecretKey)) + hash := hmac.New(sha1.New, []byte(v2.opt.SecretAccessKey)) _, _ = hash.Write([]byte(payload)) signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size())) base64.StdEncoding.Encode(signature, hash.Sum(nil)) // Set signature in request - req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature)) + req.Header.Set("Authorization", "AWS "+v2.opt.AccessKeyID+":"+string(signature)) + return nil } diff --git a/docs/content/s3.md b/docs/content/s3.md index c4ccbab89..7be465320 100644 --- a/docs/content/s3.md +++ b/docs/content/s3.md @@ -571,15 +571,21 @@ The different authentication methods are tried in this order: - Session Token: `AWS_SESSION_TOKEN` (optional) - Or, use a [named profile](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html): - Profile files are standard files used by AWS CLI tools - - By default it will use the profile in your home directory (e.g. `~/.aws/credentials` on unix based systems) file and the "default" profile, to change set these environment variables: - - `AWS_SHARED_CREDENTIALS_FILE` to control which file. - - `AWS_PROFILE` to control which profile to use. + - By default it will use the profile in your home directory (e.g. `~/.aws/credentials` on unix based systems) file and the "default" profile, to change set these environment variables or config keys: + - `AWS_SHARED_CREDENTIALS_FILE` to control which file or the `shared_credentials_file` config key. + - `AWS_PROFILE` to control which profile to use or the `profile` config key. - Or, run `rclone` in an ECS task with an IAM role (AWS only). - Or, run `rclone` on an EC2 instance with an IAM role (AWS only). - Or, run `rclone` in an EKS pod with an IAM role that is associated with a service account (AWS only). + - Or, use [process credentials](https://docs.aws.amazon.com/sdkref/latest/guide/feature-process-credentials.html) to read config from an external program. + +With `env_auth = true` rclone (which uses the SDK for Go v2) should support +[all authentication methods](https://docs.aws.amazon.com/sdkref/latest/guide/standardized-credentials.html) +that the `aws` CLI tool does and the other AWS SDKs. If none of these option actually end up providing `rclone` with AWS -credentials then S3 interaction will be non-authenticated (see below). +credentials then S3 interaction will be non-authenticated (see the +[anonymous access](#anonymous-access) section for more info). ### S3 Permissions @@ -2171,7 +2177,7 @@ It doesn't return anything. {{< rem autogenerated options stop >}} -### Anonymous access to public buckets +### Anonymous access to public buckets {#anonymous-access} If you want to use rclone to access a public bucket, configure with a blank `access_key_id` and `secret_access_key`. Your config should end @@ -2181,15 +2187,6 @@ up looking like this: [anons3] type = s3 provider = AWS -env_auth = false -access_key_id = -secret_access_key = -region = us-east-1 -endpoint = -location_constraint = -acl = private -server_side_encryption = -storage_class = ``` Then use it as normal with the name of the public bucket, e.g. @@ -2198,6 +2195,10 @@ Then use it as normal with the name of the public bucket, e.g. You will be able to list and copy data but not upload it. +You can also do this entirely on the command line + + rclone lsd :s3,provider=AWS:1000genomes + ## Providers ### AWS S3 diff --git a/go.mod b/go.mod index 7c1623df0..980d21f9a 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,12 @@ require ( github.com/anacrolix/log v0.15.2 github.com/atotto/clipboard v0.1.4 github.com/aws/aws-sdk-go v1.54.19 + github.com/aws/aws-sdk-go-v2 v1.30.3 + github.com/aws/aws-sdk-go-v2/config v1.27.27 + github.com/aws/aws-sdk-go-v2/credentials v1.17.27 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 + github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3 + github.com/aws/smithy-go v1.20.3 github.com/buengese/sgzip v0.1.1 github.com/cloudsoda/go-smb2 v0.0.0-20231124195312-f3ec8ae2c891 github.com/colinmarc/hdfs/v2 v2.4.0 @@ -99,6 +105,19 @@ require ( github.com/akavel/rsrc v0.10.2 // indirect github.com/anacrolix/generics v0.0.1 // indirect github.com/andybalholm/cascadia v1.3.2 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bradenaw/juniper v0.15.2 // indirect github.com/calebcase/tmpfile v1.0.3 // indirect diff --git a/go.sum b/go.sum index f0aaa8f62..59b3b310c 100644 --- a/go.sum +++ b/go.sum @@ -104,6 +104,44 @@ github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY= +github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM= +github.com/aws/aws-sdk-go-v2/config v1.27.27 h1:HdqgGt1OAP0HkEDDShEl0oSYa9ZZBSOmKpdpsDMdO90= +github.com/aws/aws-sdk-go-v2/config v1.27.27/go.mod h1:MVYamCg76dFNINkZFu4n4RjDixhVr51HLj4ErWzrVwg= +github.com/aws/aws-sdk-go-v2/credentials v1.17.27 h1:2raNba6gr2IfA0eqqiP2XiQ0UVOpGPgDSi0I9iAP+UI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.27/go.mod h1:gniiwbGahQByxan6YjQUMcW4Aov6bLC3m+evgcoN4r4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 h1:zeN9UtUlA6FTx0vFSayxSX32HDw73Yb6Hh2izDSFxXY= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10/go.mod h1:3HKuexPDcwLWPaqpW2UR/9n8N/u/3CKcGAzSs8p8u8g= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17/go.mod h1:oBtcnYua/CgzCWYN7NZ5j7PotFDaFSUjCYVTtfyn7vw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg= +github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3 h1:hT8ZAZRIfqBqHbzKTII+CIiY8G2oC9OpLedkZ51DWl8= +github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 h1:BXx0ZIxvrJdSgSvKTZ+yRBeSqqgPM89VPlulEcl37tM= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ= +github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE= +github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bradenaw/juniper v0.15.2 h1:0JdjBGEF2jP1pOxmlNIrPhAoQN7Ng5IMAY5D0PHMW4U=