s3: fix GCS provider after SDKv2 update #4989

This also adds GCS via S3 to the integration tester.
This commit is contained in:
Nick Craig-Wood 2024-08-05 15:48:29 +01:00
parent fd9049c83d
commit 66ae050a8b
2 changed files with 93 additions and 0 deletions

View File

@ -2931,6 +2931,76 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
}
}
// Google Cloud Storage alters the Accept-Encoding header, which
// breaks the v2 request signature
//
// It also doesn't like the x-id URL parameter SDKv2 puts in so we
// remove that too.
//
// See https://github.com/aws/aws-sdk-go-v2/issues/1816.
// Adapted from: https://github.com/aws/aws-sdk-go-v2/issues/1816#issuecomment-1927281540
func fixupGCS(o *s3.Options) {
type ignoredHeadersKey struct{}
headers := []string{"Accept-Encoding"}
fixup := middleware.FinalizeMiddlewareFunc(
"FixupGCS",
func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) {
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("fixupGCS: unexpected request middleware type %T", in.Request)
}
// Delete headers from being signed - will restore later
ignored := make(map[string]string, len(headers))
for _, h := range headers {
ignored[h] = req.Header.Get(h)
req.Header.Del(h)
}
// Remove x-id because Google doesn't like them
if query := req.URL.Query(); query.Has("x-id") {
query.Del("x-id")
req.URL.RawQuery = query.Encode()
}
// Store ignored on context
ctx = middleware.WithStackValue(ctx, ignoredHeadersKey{}, ignored)
return next.HandleFinalize(ctx, in)
},
)
// Restore headers if necessary
restore := middleware.FinalizeMiddlewareFunc(
"FixupGCSRestoreHeaders",
func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) {
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("fixupGCS: unexpected request middleware type %T", in.Request)
}
// Restore ignored from ctx
ignored, _ := middleware.GetStackValue(ctx, ignoredHeadersKey{}).(map[string]string)
for k, v := range ignored {
req.Header.Set(k, v)
}
return next.HandleFinalize(ctx, in)
},
)
o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
if err := stack.Finalize.Insert(fixup, "Signing", middleware.Before); err != nil {
return err
}
if err := stack.Finalize.Insert(restore, "Signing", middleware.After); err != nil {
return err
}
return nil
})
}
// s3Connection makes a connection to s3
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Client *s3.Client, err error) {
ci := fs.GetConfig(ctx)
@ -3017,6 +3087,12 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
})
}
if opt.Provider == "GCS" {
options = append(options, func(o *s3.Options) {
fixupGCS(o)
})
}
c := s3.NewFromConfig(awsConfig, options...)
return c, nil
}

View File

@ -215,6 +215,23 @@ backends:
- TestIntegration/FsMkdir/FsEncoding/leading_HT
- TestIntegration/FsMkdir/FsEncoding/leading_VT
- TestIntegration/FsMkdir/FsPutFiles/FsPutStream/0
- backend: "s3"
remote: "TestS3GCS:"
fastlist: true
ignore:
- TestIntegration/FsMkdir/FsEncoding/control_chars
- TestIntegration/FsMkdir/FsEncoding/leading_CR
- TestIntegration/FsMkdir/FsEncoding/leading_LF
- TestIntegration/FsMkdir/FsEncoding/trailing_CR
- TestIntegration/FsMkdir/FsEncoding/trailing_LF
- TestIntegration/FsMkdir/FsPutFiles/PublicLink
- TestIntegration/FsMkdir/FsPutFiles/SetTier
- TestIntegration/FsMkdir/FsPutFiles/Internal/Metadata/GzipEncoding
- TestIntegration/FsMkdir/FsPutFiles/Internal/Versions/VersionAt/AfterDelete/List
- TestIntegration/FsMkdir/FsPutFiles/Internal/Versions/VersionAt/AfterDelete/NewObject
- TestIntegration/FsMkdir/FsPutFiles/Internal/Versions/VersionAt/AfterTwo/List
- TestIntegration/FsMkdir/FsPutFiles/Internal/Versions/VersionAt/AfterTwo/NewObject
- TestBisyncRemoteRemote/extended_filenames
# Disabled due to excessive rate limiting at DO which cause the tests never to pass
# This hits the rate limit as documented here: https://www.digitalocean.com/docs/spaces/#limits
# 2 COPYs per 5 minutes on any individual object in a Space