mirror of
https://github.com/rclone/rclone.git
synced 2024-11-26 10:13:52 +08:00
s3: refactor to use generated code instead of reflection to copy structs
This commit is contained in:
parent
4344a3e2ea
commit
4b981100db
101
backend/s3/gen_setfrom.go
Normal file
101
backend/s3/gen_setfrom.go
Normal file
|
@ -0,0 +1,101 @@
|
||||||
|
// Generate boilerplate code for setting similar structs from each other
|
||||||
|
|
||||||
|
//go:build ignore
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
|
)
|
||||||
|
|
||||||
|
// flags
|
||||||
|
var (
|
||||||
|
outputFile = flag.String("o", "", "Output file name, stdout if unset")
|
||||||
|
)
|
||||||
|
|
||||||
|
// globals
|
||||||
|
var (
|
||||||
|
out io.Writer = os.Stdout
|
||||||
|
)
|
||||||
|
|
||||||
|
// genSetFrom generates code to set the public members of a from b
|
||||||
|
//
|
||||||
|
// a and b should be pointers to structs
|
||||||
|
//
|
||||||
|
// a can be a different type from b
|
||||||
|
//
|
||||||
|
// Only the Fields which have the same name and assignable type on a
|
||||||
|
// and b will be set.
|
||||||
|
//
|
||||||
|
// This is useful for copying between almost identical structures that
|
||||||
|
// are frequently present in auto-generated code for cloud storage
|
||||||
|
// interfaces.
|
||||||
|
func genSetFrom(a, b interface{}) {
|
||||||
|
name := fmt.Sprintf("setFrom_%T_%T", a, b)
|
||||||
|
name = strings.Replace(name, ".", "", -1)
|
||||||
|
name = strings.Replace(name, "*", "", -1)
|
||||||
|
fmt.Fprintf(out, "\n// %s copies matching elements from a to b\n", name)
|
||||||
|
fmt.Fprintf(out, "func %s(a %T, b %T) {\n", name, a, b)
|
||||||
|
ta := reflect.TypeOf(a).Elem()
|
||||||
|
tb := reflect.TypeOf(b).Elem()
|
||||||
|
va := reflect.ValueOf(a).Elem()
|
||||||
|
vb := reflect.ValueOf(b).Elem()
|
||||||
|
for i := 0; i < tb.NumField(); i++ {
|
||||||
|
bField := vb.Field(i)
|
||||||
|
tbField := tb.Field(i)
|
||||||
|
name := tbField.Name
|
||||||
|
aField := va.FieldByName(name)
|
||||||
|
taField, found := ta.FieldByName(name)
|
||||||
|
if found && aField.IsValid() && bField.IsValid() && aField.CanSet() && tbField.Type.AssignableTo(taField.Type) {
|
||||||
|
fmt.Fprintf(out, "\ta.%s = b.%s\n", name, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintf(out, "}\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if *outputFile != "" {
|
||||||
|
fd, err := os.Create(*outputFile)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err := fd.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
out = fd
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(out, `// Code generated by "go run gen_setfrom.go"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package s3
|
||||||
|
|
||||||
|
import "github.com/aws/aws-sdk-go/service/s3"
|
||||||
|
`)
|
||||||
|
|
||||||
|
genSetFrom(new(s3.ListObjectsInput), new(s3.ListObjectsV2Input))
|
||||||
|
genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectsOutput))
|
||||||
|
genSetFrom(new(s3.ListObjectVersionsInput), new(s3.ListObjectsV2Input))
|
||||||
|
genSetFrom(new(s3.ObjectVersion), new(s3.DeleteMarkerEntry))
|
||||||
|
genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectVersionsOutput))
|
||||||
|
genSetFrom(new(s3.Object), new(s3.ObjectVersion))
|
||||||
|
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.HeadObjectOutput))
|
||||||
|
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.CopyObjectInput))
|
||||||
|
genSetFrom(new(s3.UploadPartCopyInput), new(s3.CopyObjectInput))
|
||||||
|
genSetFrom(new(s3.HeadObjectOutput), new(s3.GetObjectOutput))
|
||||||
|
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.PutObjectInput))
|
||||||
|
genSetFrom(new(s3.HeadObjectOutput), new(s3.PutObjectInput))
|
||||||
|
}
|
|
@ -1,6 +1,8 @@
|
||||||
// Package s3 provides an interface to Amazon S3 oject storage
|
// Package s3 provides an interface to Amazon S3 oject storage
|
||||||
package s3
|
package s3
|
||||||
|
|
||||||
|
//go:generate go run gen_setfrom.go -o setfrom.go
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
@ -54,7 +56,6 @@ import (
|
||||||
"github.com/rclone/rclone/lib/pool"
|
"github.com/rclone/rclone/lib/pool"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
"github.com/rclone/rclone/lib/structs"
|
|
||||||
"github.com/rclone/rclone/lib/version"
|
"github.com/rclone/rclone/lib/version"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
@ -2866,7 +2867,8 @@ func (f *Fs) newV1List(req *s3.ListObjectsV2Input) bucketLister {
|
||||||
f: f,
|
f: f,
|
||||||
}
|
}
|
||||||
// Convert v2 req into v1 req
|
// Convert v2 req into v1 req
|
||||||
structs.SetFrom(&l.req, req)
|
//structs.SetFrom(&l.req, req)
|
||||||
|
setFrom_s3ListObjectsInput_s3ListObjectsV2Input(&l.req, req)
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2898,7 +2900,8 @@ func (ls *v1List) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versi
|
||||||
|
|
||||||
// convert v1 resp into v2 resp
|
// convert v1 resp into v2 resp
|
||||||
resp = new(s3.ListObjectsV2Output)
|
resp = new(s3.ListObjectsV2Output)
|
||||||
structs.SetFrom(resp, respv1)
|
//structs.SetFrom(resp, respv1)
|
||||||
|
setFrom_s3ListObjectsV2Output_s3ListObjectsOutput(resp, respv1)
|
||||||
|
|
||||||
return resp, nil, nil
|
return resp, nil, nil
|
||||||
}
|
}
|
||||||
|
@ -2961,7 +2964,8 @@ func (f *Fs) newVersionsList(req *s3.ListObjectsV2Input, hidden bool, versionAt
|
||||||
hidden: hidden,
|
hidden: hidden,
|
||||||
}
|
}
|
||||||
// Convert v2 req into withVersions req
|
// Convert v2 req into withVersions req
|
||||||
structs.SetFrom(&l.req, req)
|
//structs.SetFrom(&l.req, req)
|
||||||
|
setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input(&l.req, req)
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3011,7 +3015,8 @@ func mergeDeleteMarkers(oldVersions []*s3.ObjectVersion, deleteMarkers []*s3.Del
|
||||||
newVersions = make([]*s3.ObjectVersion, 0, len(oldVersions)+len(deleteMarkers))
|
newVersions = make([]*s3.ObjectVersion, 0, len(oldVersions)+len(deleteMarkers))
|
||||||
for _, deleteMarker := range deleteMarkers {
|
for _, deleteMarker := range deleteMarkers {
|
||||||
var obj = new(s3.ObjectVersion)
|
var obj = new(s3.ObjectVersion)
|
||||||
structs.SetFrom(obj, deleteMarker)
|
//structs.SetFrom(obj, deleteMarker)
|
||||||
|
setFrom_s3ObjectVersion_s3DeleteMarkerEntry(obj, deleteMarker)
|
||||||
obj.Size = isDeleteMarker
|
obj.Size = isDeleteMarker
|
||||||
for len(oldVersions) > 0 && versionLess(oldVersions[0], obj) {
|
for len(oldVersions) > 0 && versionLess(oldVersions[0], obj) {
|
||||||
newVersions = append(newVersions, oldVersions[0])
|
newVersions = append(newVersions, oldVersions[0])
|
||||||
|
@ -3045,7 +3050,8 @@ func (ls *versionsList) List(ctx context.Context) (resp *s3.ListObjectsV2Output,
|
||||||
|
|
||||||
// convert Versions resp into v2 resp
|
// convert Versions resp into v2 resp
|
||||||
resp = new(s3.ListObjectsV2Output)
|
resp = new(s3.ListObjectsV2Output)
|
||||||
structs.SetFrom(resp, respVersions)
|
//structs.SetFrom(resp, respVersions)
|
||||||
|
setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput(resp, respVersions)
|
||||||
|
|
||||||
// Merge in delete Markers as s3.ObjectVersion if we need them
|
// Merge in delete Markers as s3.ObjectVersion if we need them
|
||||||
if ls.hidden || ls.usingVersionAt {
|
if ls.hidden || ls.usingVersionAt {
|
||||||
|
@ -3074,7 +3080,8 @@ func (ls *versionsList) List(ctx context.Context) (resp *s3.ListObjectsV2Output,
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var obj = new(s3.Object)
|
var obj = new(s3.Object)
|
||||||
structs.SetFrom(obj, objVersion)
|
//structs.SetFrom(obj, objVersion)
|
||||||
|
setFrom_s3Object_s3ObjectVersion(obj, objVersion)
|
||||||
// Adjust the file names
|
// Adjust the file names
|
||||||
if !ls.usingVersionAt && !aws.BoolValue(objVersion.IsLatest) {
|
if !ls.usingVersionAt && !aws.BoolValue(objVersion.IsLatest) {
|
||||||
if obj.Key != nil && objVersion.LastModified != nil {
|
if obj.Key != nil && objVersion.LastModified != nil {
|
||||||
|
@ -3611,7 +3618,8 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
||||||
req := &s3.CreateMultipartUploadInput{}
|
req := &s3.CreateMultipartUploadInput{}
|
||||||
|
|
||||||
// Fill in the request from the head info
|
// Fill in the request from the head info
|
||||||
structs.SetFrom(req, info)
|
//structs.SetFrom(req, info)
|
||||||
|
setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(req, info)
|
||||||
|
|
||||||
// If copy metadata was set then set the Metadata to that read
|
// If copy metadata was set then set the Metadata to that read
|
||||||
// from the head request
|
// from the head request
|
||||||
|
@ -3620,7 +3628,8 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
||||||
}
|
}
|
||||||
|
|
||||||
// Overwrite any from the copyReq
|
// Overwrite any from the copyReq
|
||||||
structs.SetFrom(req, copyReq)
|
//structs.SetFrom(req, copyReq)
|
||||||
|
setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(req, copyReq)
|
||||||
|
|
||||||
req.Bucket = &dstBucket
|
req.Bucket = &dstBucket
|
||||||
req.Key = &dstPath
|
req.Key = &dstPath
|
||||||
|
@ -3660,7 +3669,8 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
||||||
if err := f.pacer.Call(func() (bool, error) {
|
if err := f.pacer.Call(func() (bool, error) {
|
||||||
partNum := partNum
|
partNum := partNum
|
||||||
uploadPartReq := &s3.UploadPartCopyInput{}
|
uploadPartReq := &s3.UploadPartCopyInput{}
|
||||||
structs.SetFrom(uploadPartReq, copyReq)
|
//structs.SetFrom(uploadPartReq, copyReq)
|
||||||
|
setFrom_s3UploadPartCopyInput_s3CopyObjectInput(uploadPartReq, copyReq)
|
||||||
uploadPartReq.Bucket = &dstBucket
|
uploadPartReq.Bucket = &dstBucket
|
||||||
uploadPartReq.Key = &dstPath
|
uploadPartReq.Key = &dstPath
|
||||||
uploadPartReq.PartNumber = &partNum
|
uploadPartReq.PartNumber = &partNum
|
||||||
|
@ -4632,7 +4642,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var head s3.HeadObjectOutput
|
var head s3.HeadObjectOutput
|
||||||
structs.SetFrom(&head, resp)
|
//structs.SetFrom(&head, resp)
|
||||||
|
setFrom_s3HeadObjectOutput_s3GetObjectOutput(&head, resp)
|
||||||
head.ContentLength = size
|
head.ContentLength = size
|
||||||
o.setMetaData(&head)
|
o.setMetaData(&head)
|
||||||
return resp.Body, nil
|
return resp.Body, nil
|
||||||
|
@ -4675,7 +4686,8 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||||
memPool := f.getMemoryPool(int64(partSize))
|
memPool := f.getMemoryPool(int64(partSize))
|
||||||
|
|
||||||
var mReq s3.CreateMultipartUploadInput
|
var mReq s3.CreateMultipartUploadInput
|
||||||
structs.SetFrom(&mReq, req)
|
//structs.SetFrom(&mReq, req)
|
||||||
|
setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(&mReq, req)
|
||||||
var cout *s3.CreateMultipartUploadOutput
|
var cout *s3.CreateMultipartUploadOutput
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
var err error
|
var err error
|
||||||
|
@ -5144,7 +5156,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||||
// uploaded properly. If size < 0 then we need to do the HEAD.
|
// uploaded properly. If size < 0 then we need to do the HEAD.
|
||||||
if o.fs.opt.NoHead && size >= 0 {
|
if o.fs.opt.NoHead && size >= 0 {
|
||||||
var head s3.HeadObjectOutput
|
var head s3.HeadObjectOutput
|
||||||
structs.SetFrom(&head, &req)
|
//structs.SetFrom(&head, &req)
|
||||||
|
setFrom_s3HeadObjectOutput_s3PutObjectInput(&head, &req)
|
||||||
head.ETag = &md5sumHex // doesn't matter quotes are misssing
|
head.ETag = &md5sumHex // doesn't matter quotes are misssing
|
||||||
head.ContentLength = &size
|
head.ContentLength = &size
|
||||||
// If we have done a single part PUT request then we can read these
|
// If we have done a single part PUT request then we can read these
|
||||||
|
|
239
backend/s3/setfrom.go
Normal file
239
backend/s3/setfrom.go
Normal file
|
@ -0,0 +1,239 @@
|
||||||
|
// Code generated by "go run gen_setfrom.go"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package s3
|
||||||
|
|
||||||
|
import "github.com/aws/aws-sdk-go/service/s3"
|
||||||
|
|
||||||
|
// setFrom_s3ListObjectsInput_s3ListObjectsV2Input copies matching elements from a to b
|
||||||
|
func setFrom_s3ListObjectsInput_s3ListObjectsV2Input(a *s3.ListObjectsInput, b *s3.ListObjectsV2Input) {
|
||||||
|
a.Bucket = b.Bucket
|
||||||
|
a.Delimiter = b.Delimiter
|
||||||
|
a.EncodingType = b.EncodingType
|
||||||
|
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||||
|
a.MaxKeys = b.MaxKeys
|
||||||
|
a.Prefix = b.Prefix
|
||||||
|
a.RequestPayer = b.RequestPayer
|
||||||
|
}
|
||||||
|
|
||||||
|
// setFrom_s3ListObjectsV2Output_s3ListObjectsOutput copies matching elements from a to b
|
||||||
|
func setFrom_s3ListObjectsV2Output_s3ListObjectsOutput(a *s3.ListObjectsV2Output, b *s3.ListObjectsOutput) {
|
||||||
|
a.CommonPrefixes = b.CommonPrefixes
|
||||||
|
a.Contents = b.Contents
|
||||||
|
a.Delimiter = b.Delimiter
|
||||||
|
a.EncodingType = b.EncodingType
|
||||||
|
a.IsTruncated = b.IsTruncated
|
||||||
|
a.MaxKeys = b.MaxKeys
|
||||||
|
a.Name = b.Name
|
||||||
|
a.Prefix = b.Prefix
|
||||||
|
}
|
||||||
|
|
||||||
|
// setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input copies matching elements from a to b
|
||||||
|
func setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input(a *s3.ListObjectVersionsInput, b *s3.ListObjectsV2Input) {
|
||||||
|
a.Bucket = b.Bucket
|
||||||
|
a.Delimiter = b.Delimiter
|
||||||
|
a.EncodingType = b.EncodingType
|
||||||
|
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||||
|
a.MaxKeys = b.MaxKeys
|
||||||
|
a.Prefix = b.Prefix
|
||||||
|
}
|
||||||
|
|
||||||
|
// setFrom_s3ObjectVersion_s3DeleteMarkerEntry copies matching elements from a to b
|
||||||
|
func setFrom_s3ObjectVersion_s3DeleteMarkerEntry(a *s3.ObjectVersion, b *s3.DeleteMarkerEntry) {
|
||||||
|
a.IsLatest = b.IsLatest
|
||||||
|
a.Key = b.Key
|
||||||
|
a.LastModified = b.LastModified
|
||||||
|
a.Owner = b.Owner
|
||||||
|
a.VersionId = b.VersionId
|
||||||
|
}
|
||||||
|
|
||||||
|
// setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput copies matching elements from a to b
|
||||||
|
func setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput(a *s3.ListObjectsV2Output, b *s3.ListObjectVersionsOutput) {
|
||||||
|
a.CommonPrefixes = b.CommonPrefixes
|
||||||
|
a.Delimiter = b.Delimiter
|
||||||
|
a.EncodingType = b.EncodingType
|
||||||
|
a.IsTruncated = b.IsTruncated
|
||||||
|
a.MaxKeys = b.MaxKeys
|
||||||
|
a.Name = b.Name
|
||||||
|
a.Prefix = b.Prefix
|
||||||
|
}
|
||||||
|
|
||||||
|
// setFrom_s3Object_s3ObjectVersion copies matching elements from a to b
|
||||||
|
func setFrom_s3Object_s3ObjectVersion(a *s3.Object, b *s3.ObjectVersion) {
|
||||||
|
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||||
|
a.ETag = b.ETag
|
||||||
|
a.Key = b.Key
|
||||||
|
a.LastModified = b.LastModified
|
||||||
|
a.Owner = b.Owner
|
||||||
|
a.Size = b.Size
|
||||||
|
a.StorageClass = b.StorageClass
|
||||||
|
}
|
||||||
|
|
||||||
|
// setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput copies matching elements from a to b
|
||||||
|
func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipartUploadInput, b *s3.HeadObjectOutput) {
|
||||||
|
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||||
|
a.CacheControl = b.CacheControl
|
||||||
|
a.ContentDisposition = b.ContentDisposition
|
||||||
|
a.ContentEncoding = b.ContentEncoding
|
||||||
|
a.ContentLanguage = b.ContentLanguage
|
||||||
|
a.ContentType = b.ContentType
|
||||||
|
a.Metadata = b.Metadata
|
||||||
|
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||||
|
a.ObjectLockMode = b.ObjectLockMode
|
||||||
|
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||||
|
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||||
|
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||||
|
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||||
|
a.ServerSideEncryption = b.ServerSideEncryption
|
||||||
|
a.StorageClass = b.StorageClass
|
||||||
|
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
// setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput copies matching elements from a to b
|
||||||
|
func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipartUploadInput, b *s3.CopyObjectInput) {
|
||||||
|
a.ACL = b.ACL
|
||||||
|
a.Bucket = b.Bucket
|
||||||
|
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||||
|
a.CacheControl = b.CacheControl
|
||||||
|
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||||
|
a.ContentDisposition = b.ContentDisposition
|
||||||
|
a.ContentEncoding = b.ContentEncoding
|
||||||
|
a.ContentLanguage = b.ContentLanguage
|
||||||
|
a.ContentType = b.ContentType
|
||||||
|
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||||
|
a.Expires = b.Expires
|
||||||
|
a.GrantFullControl = b.GrantFullControl
|
||||||
|
a.GrantRead = b.GrantRead
|
||||||
|
a.GrantReadACP = b.GrantReadACP
|
||||||
|
a.GrantWriteACP = b.GrantWriteACP
|
||||||
|
a.Key = b.Key
|
||||||
|
a.Metadata = b.Metadata
|
||||||
|
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||||
|
a.ObjectLockMode = b.ObjectLockMode
|
||||||
|
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||||
|
a.RequestPayer = b.RequestPayer
|
||||||
|
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||||
|
a.SSECustomerKey = b.SSECustomerKey
|
||||||
|
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||||
|
a.SSEKMSEncryptionContext = b.SSEKMSEncryptionContext
|
||||||
|
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||||
|
a.ServerSideEncryption = b.ServerSideEncryption
|
||||||
|
a.StorageClass = b.StorageClass
|
||||||
|
a.Tagging = b.Tagging
|
||||||
|
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
// setFrom_s3UploadPartCopyInput_s3CopyObjectInput copies matching elements from a to b
|
||||||
|
func setFrom_s3UploadPartCopyInput_s3CopyObjectInput(a *s3.UploadPartCopyInput, b *s3.CopyObjectInput) {
|
||||||
|
a.Bucket = b.Bucket
|
||||||
|
a.CopySource = b.CopySource
|
||||||
|
a.CopySourceIfMatch = b.CopySourceIfMatch
|
||||||
|
a.CopySourceIfModifiedSince = b.CopySourceIfModifiedSince
|
||||||
|
a.CopySourceIfNoneMatch = b.CopySourceIfNoneMatch
|
||||||
|
a.CopySourceIfUnmodifiedSince = b.CopySourceIfUnmodifiedSince
|
||||||
|
a.CopySourceSSECustomerAlgorithm = b.CopySourceSSECustomerAlgorithm
|
||||||
|
a.CopySourceSSECustomerKey = b.CopySourceSSECustomerKey
|
||||||
|
a.CopySourceSSECustomerKeyMD5 = b.CopySourceSSECustomerKeyMD5
|
||||||
|
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||||
|
a.ExpectedSourceBucketOwner = b.ExpectedSourceBucketOwner
|
||||||
|
a.Key = b.Key
|
||||||
|
a.RequestPayer = b.RequestPayer
|
||||||
|
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||||
|
a.SSECustomerKey = b.SSECustomerKey
|
||||||
|
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||||
|
}
|
||||||
|
|
||||||
|
// setFrom_s3HeadObjectOutput_s3GetObjectOutput copies matching elements from a to b
|
||||||
|
func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3.GetObjectOutput) {
|
||||||
|
a.AcceptRanges = b.AcceptRanges
|
||||||
|
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||||
|
a.CacheControl = b.CacheControl
|
||||||
|
a.ChecksumCRC32 = b.ChecksumCRC32
|
||||||
|
a.ChecksumCRC32C = b.ChecksumCRC32C
|
||||||
|
a.ChecksumSHA1 = b.ChecksumSHA1
|
||||||
|
a.ChecksumSHA256 = b.ChecksumSHA256
|
||||||
|
a.ContentDisposition = b.ContentDisposition
|
||||||
|
a.ContentEncoding = b.ContentEncoding
|
||||||
|
a.ContentLanguage = b.ContentLanguage
|
||||||
|
a.ContentLength = b.ContentLength
|
||||||
|
a.ContentType = b.ContentType
|
||||||
|
a.DeleteMarker = b.DeleteMarker
|
||||||
|
a.ETag = b.ETag
|
||||||
|
a.Expiration = b.Expiration
|
||||||
|
a.Expires = b.Expires
|
||||||
|
a.LastModified = b.LastModified
|
||||||
|
a.Metadata = b.Metadata
|
||||||
|
a.MissingMeta = b.MissingMeta
|
||||||
|
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||||
|
a.ObjectLockMode = b.ObjectLockMode
|
||||||
|
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||||
|
a.PartsCount = b.PartsCount
|
||||||
|
a.ReplicationStatus = b.ReplicationStatus
|
||||||
|
a.RequestCharged = b.RequestCharged
|
||||||
|
a.Restore = b.Restore
|
||||||
|
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||||
|
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||||
|
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||||
|
a.ServerSideEncryption = b.ServerSideEncryption
|
||||||
|
a.StorageClass = b.StorageClass
|
||||||
|
a.VersionId = b.VersionId
|
||||||
|
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
// setFrom_s3CreateMultipartUploadInput_s3PutObjectInput copies matching elements from a to b
|
||||||
|
func setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(a *s3.CreateMultipartUploadInput, b *s3.PutObjectInput) {
|
||||||
|
a.ACL = b.ACL
|
||||||
|
a.Bucket = b.Bucket
|
||||||
|
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||||
|
a.CacheControl = b.CacheControl
|
||||||
|
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||||
|
a.ContentDisposition = b.ContentDisposition
|
||||||
|
a.ContentEncoding = b.ContentEncoding
|
||||||
|
a.ContentLanguage = b.ContentLanguage
|
||||||
|
a.ContentType = b.ContentType
|
||||||
|
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||||
|
a.Expires = b.Expires
|
||||||
|
a.GrantFullControl = b.GrantFullControl
|
||||||
|
a.GrantRead = b.GrantRead
|
||||||
|
a.GrantReadACP = b.GrantReadACP
|
||||||
|
a.GrantWriteACP = b.GrantWriteACP
|
||||||
|
a.Key = b.Key
|
||||||
|
a.Metadata = b.Metadata
|
||||||
|
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||||
|
a.ObjectLockMode = b.ObjectLockMode
|
||||||
|
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||||
|
a.RequestPayer = b.RequestPayer
|
||||||
|
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||||
|
a.SSECustomerKey = b.SSECustomerKey
|
||||||
|
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||||
|
a.SSEKMSEncryptionContext = b.SSEKMSEncryptionContext
|
||||||
|
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||||
|
a.ServerSideEncryption = b.ServerSideEncryption
|
||||||
|
a.StorageClass = b.StorageClass
|
||||||
|
a.Tagging = b.Tagging
|
||||||
|
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
// setFrom_s3HeadObjectOutput_s3PutObjectInput copies matching elements from a to b
|
||||||
|
func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.PutObjectInput) {
|
||||||
|
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||||
|
a.CacheControl = b.CacheControl
|
||||||
|
a.ChecksumCRC32 = b.ChecksumCRC32
|
||||||
|
a.ChecksumCRC32C = b.ChecksumCRC32C
|
||||||
|
a.ChecksumSHA1 = b.ChecksumSHA1
|
||||||
|
a.ChecksumSHA256 = b.ChecksumSHA256
|
||||||
|
a.ContentDisposition = b.ContentDisposition
|
||||||
|
a.ContentEncoding = b.ContentEncoding
|
||||||
|
a.ContentLanguage = b.ContentLanguage
|
||||||
|
a.ContentLength = b.ContentLength
|
||||||
|
a.ContentType = b.ContentType
|
||||||
|
a.Metadata = b.Metadata
|
||||||
|
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||||
|
a.ObjectLockMode = b.ObjectLockMode
|
||||||
|
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||||
|
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||||
|
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||||
|
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||||
|
a.ServerSideEncryption = b.ServerSideEncryption
|
||||||
|
a.StorageClass = b.StorageClass
|
||||||
|
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user