diff --git a/amazonclouddrive/amazonclouddrive.go b/amazonclouddrive/amazonclouddrive.go index 9df10ea75..cc6cc287f 100644 --- a/amazonclouddrive/amazonclouddrive.go +++ b/amazonclouddrive/amazonclouddrive.go @@ -49,9 +49,7 @@ const ( var ( // Flags tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink - uploadWaitLimit = pflag.DurationP("acd-upload-wait-limit", "", 60*time.Second, "Don't wait for completed uploads to appear if they took less than this time.") - uploadWaitTime = pflag.DurationP("acd-upload-wait-time", "", 2*60*time.Second, "Time to wait after a failed complete upload to see if it appears.") - uploadWaitPerGB = pflag.DurationP("acd-upload-wait-per-gb", "", 30*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.") + uploadWaitPerGB = pflag.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.") // Description of how to auth for this app acdConfig = &oauth2.Config{ Scopes: []string{"clouddrive:read_all", "clouddrive:write"}, @@ -491,28 +489,35 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i // if resp == nil || resp.StatusCode != 408 && resp.StatusCode != 500 && resp.StatusCode != 504 { // return false, inInfo, inErr // } + + // The HTTP status + httpStatus := "HTTP status UNKNOWN" + if resp != nil { + httpStatus = resp.Status + } + // check to see if we read to the end buf := make([]byte, 1) n, err := in.Read(buf) if !(n == 0 && err == io.EOF) { - fs.Debug(src, "Upload error detected but didn't finish upload: %v", inErr) + fs.Debug(src, "Upload error detected but didn't finish upload: %v (%q)", inErr, httpStatus) return false, inInfo, inErr } - // Only wait for items which have been in transit for > uploadWaitLimit - if uploadTime < *uploadWaitLimit { - fs.Debug(src, "Upload error detected but not waiting since it only took %v to upload: %v", uploadTime, inErr) + // Don't wait for uploads - assume they will appear later + if *uploadWaitPerGB <= 0 { + fs.Debug(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus) return false, inInfo, inErr } // Time we should wait for the upload uploadWaitPerByte := float64(*uploadWaitPerGB) / 1024 / 1024 / 1024 - timeToWait := time.Duration(uploadWaitPerByte*float64(src.Size())) + *uploadWaitTime + timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size())) const sleepTime = 5 * time.Second // sleep between tries retries := int((timeToWait + sleepTime - 1) / sleepTime) // number of retries, rounded up - fs.Debug(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v", inErr) + fs.Debug(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus) remote := src.Remote() for i := 1; i <= retries; i++ { o, err := f.NewObject(remote) @@ -532,7 +537,7 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i } time.Sleep(sleepTime) } - fs.Debug(src, "Giving up waiting for object - returning original error: %v", inErr) + fs.Debug(src, "Giving up waiting for object - returning original error: %v (%q)", inErr, httpStatus) return false, inInfo, inErr } diff --git a/docs/content/amazonclouddrive.md b/docs/content/amazonclouddrive.md index 1febc535f..091c0195d 100644 --- a/docs/content/amazonclouddrive.md +++ b/docs/content/amazonclouddrive.md @@ -133,20 +133,21 @@ To download files above this threshold, rclone requests a `tempLink` which downloads the file through a temporary URL directly from the underlying S3 storage. -#### --acd-upload-wait-time=TIME, --acd-upload-wait-per-gb=TIME, --acd-upload-wait-limit=TIME #### +#### --acd-upload-wait-per-gb=TIME #### Sometimes Amazon Drive gives an error when a file has been fully uploaded but the file appears anyway after a little while. This happens sometimes for files over 1GB in size and nearly every time for -files bigger than 10GB. These parameters control the time rclone waits +files bigger than 10GB. This parameter controls the time rclone waits for the file to appear. -If the upload took less than `--acd-upload-wait-limit` (default 60s), -then we go ahead an upload it again as that will be quicker. +The default value for this parameter is 3 minutes per GB, so by +default it will wait 3 minutes for every GB uploaded to see if the +file appears. -We wait `--acd-upload-wait-time` (default 2m) for the file to appear, -with an additional `--acd-upload-wait-per-gb` (default 30s) per GB of -the uploaded file. +You can disable this feature by setting it to 0. This may cause +conflict errors as rclone retries the failed upload but the file will +most likely appear correctly eventually. These values were determined empirically by observing lots of uploads of big files for a range of file sizes.