From 725ae9138708611a301c0e42f868e23dae212876 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Tue, 1 Sep 2020 18:53:29 +0100 Subject: [PATCH] s3: reduce the default --s3-copy-cutoff to < 5GB The maximum value for the --s3--copy-cutoff should be 5GiB as tested with AWS S3. However b2 have implemented this as 5GB rather than 5GiB so having the default at 5 GiB makes the b2s3 server side copy of a large file by default. This patch sets the default to 4768 MiB which is slightly less than 5GB. This should have very little effect on anything. If in future rclone can lower this limit more if Copy can multithread. See: https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76 --- backend/s3/s3.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 430f06ea6..64f09b267 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -1064,10 +1064,12 @@ This option controls how often unused buffers will be removed from the pool.`, // Constants const ( - metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime - metaMD5Hash = "Md5chksum" // the meta key to store md5hash in - maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY - maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload + metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime + metaMD5Hash = "Md5chksum" // the meta key to store md5hash in + // The maximum size of object we can COPY - this should be 5GiB but is < 5GB for b2 compatibility + // See https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76 + maxSizeForCopy = 4768 * 1024 * 1024 + maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload minChunkSize = fs.SizeSuffix(1024 * 1024 * 5) defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024) maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)