From 2944f7603d99aa417764c67b4061374979bf208c Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Wed, 6 Sep 2017 11:13:28 +0100 Subject: [PATCH] s3: read 1000 items in listings #1653 This fixes directory listings with wasabi which fail if you supply more than the allowed 1000 items as a parameter. rclone used to supply 1024 items which exceeds the spec - this works fine with s3/ceph/etc but fails with wasabi. --- s3/s3.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/s3/s3.go b/s3/s3.go index 0fcefb04a..0ea6fe4c3 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -220,7 +220,7 @@ func init() { // Constants const ( metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime - listChunkSize = 1024 // number of items to read at once + listChunkSize = 1000 // number of items to read at once maxRetries = 10 // number of retries to make of operations maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY )