diff --git a/backend/alias/alias_internal_test.go b/backend/alias/alias_internal_test.go index 4b7d7e750..a79769ab3 100644 --- a/backend/alias/alias_internal_test.go +++ b/backend/alias/alias_internal_test.go @@ -15,8 +15,6 @@ import ( var ( remoteName = "TestAlias" - testPath = "test" - filesPath = filepath.Join(testPath, "files") ) func prepare(t *testing.T, root string) { diff --git a/backend/amazonclouddrive/amazonclouddrive.go b/backend/amazonclouddrive/amazonclouddrive.go index 3cd6a16de..f1aec3028 100644 --- a/backend/amazonclouddrive/amazonclouddrive.go +++ b/backend/amazonclouddrive/amazonclouddrive.go @@ -18,7 +18,6 @@ import ( "log" "net/http" "path" - "regexp" "strings" "time" @@ -40,7 +39,6 @@ import ( const ( folderKind = "FOLDER" fileKind = "FILE" - assetKind = "ASSET" statusAvailable = "AVAILABLE" timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z minSleep = 20 * time.Millisecond @@ -138,9 +136,6 @@ func (f *Fs) Features() *fs.Features { return f.features } -// Pattern to match a acd path -var matcher = regexp.MustCompile(`^([^/]*)(.*)$`) - // parsePath parses an acd 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index 96db542b7..861bc3740 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -8,7 +8,6 @@ import ( "encoding/binary" "encoding/hex" "fmt" - gohash "hash" "io" "net/http" "path" @@ -32,16 +31,16 @@ import ( ) const ( - apiVersion = "2017-04-17" - minSleep = 10 * time.Millisecond - maxSleep = 10 * time.Second - decayConstant = 1 // bigger for slower decay, exponential - listChunkSize = 5000 // number of items to read at once - modTimeKey = "mtime" - timeFormatIn = time.RFC3339 - timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00" - maxTotalParts = 50000 // in multipart upload - maxUncommittedSize = 9 << 30 // can't upload bigger than this + apiVersion = "2017-04-17" + minSleep = 10 * time.Millisecond + maxSleep = 10 * time.Second + decayConstant = 1 // bigger for slower decay, exponential + listChunkSize = 5000 // number of items to read at once + modTimeKey = "mtime" + timeFormatIn = time.RFC3339 + timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00" + maxTotalParts = 50000 // in multipart upload + // maxUncommittedSize = 9 << 30 // can't upload bigger than this ) // Globals @@ -847,16 +846,6 @@ func (o *Object) Storable() bool { return true } -// openFile represents an Object open for reading -type openFile struct { - o *Object // Object we are reading for - resp *http.Response // response of the GET - body io.Reader // reading from here - hash gohash.Hash // currently accumulating MD5 - bytes int64 // number of bytes read on this connection - eof bool // whether we have read end of file -} - // Open an object for read func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { getBlobOptions := storage.GetBlobOptions{} @@ -924,20 +913,6 @@ func init() { } } -// urlEncode encodes in with % encoding -func urlEncode(in string) string { - var out bytes.Buffer - for i := 0; i < len(in); i++ { - c := in[i] - if noNeedToEncode[c] { - _ = out.WriteByte(c) - } else { - _, _ = out.WriteString(fmt.Sprintf("%%%2X", c)) - } - } - return out.String() -} - // uploadMultipart uploads a file using multipart upload // // Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList. diff --git a/backend/box/box.go b/backend/box/box.go index ef13a28b0..47086a3d4 100644 --- a/backend/box/box.go +++ b/backend/box/box.go @@ -16,7 +16,6 @@ import ( "net/http" "net/url" "path" - "regexp" "strconv" "strings" "time" @@ -135,9 +134,6 @@ func (f *Fs) Features() *fs.Features { return f.features } -// Pattern to match a box path -var matcher = regexp.MustCompile(`^([^/]*)(.*)$`) - // parsePath parses an box 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") diff --git a/backend/cache/storage_persistent.go b/backend/cache/storage_persistent.go index 75464bfb7..06965b154 100644 --- a/backend/cache/storage_persistent.go +++ b/backend/cache/storage_persistent.go @@ -1070,10 +1070,3 @@ func itob(v int64) []byte { func btoi(d []byte) int64 { return int64(binary.BigEndian.Uint64(d)) } - -// cloneBytes returns a copy of a given slice. -func cloneBytes(v []byte) []byte { - var clone = make([]byte, len(v)) - copy(clone, v) - return clone -} diff --git a/backend/drive/upload.go b/backend/drive/upload.go index 7a6d68bdb..0a6a1e341 100644 --- a/backend/drive/upload.go +++ b/backend/drive/upload.go @@ -30,9 +30,6 @@ import ( const ( // statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete. statusResumeIncomplete = 308 - - // Number of times to try each chunk - maxTries = 10 ) // resumableUpload is used by the generated APIs to provide resumable uploads. @@ -192,7 +189,7 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk } // Upload uploads the chunks from the input -// It retries each chunk maxTries times (with a pause of uploadPause between attempts). +// It retries each chunk using the pacer and --low-level-retries func (rx *resumableUpload) Upload() (*drive.File, error) { start := int64(0) var StatusCode int diff --git a/backend/dropbox/dropbox.go b/backend/dropbox/dropbox.go index 9ba148971..b93849352 100644 --- a/backend/dropbox/dropbox.go +++ b/backend/dropbox/dropbox.go @@ -419,21 +419,6 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { return entries, nil } -// A read closer which doesn't close the input -type readCloser struct { - in io.Reader -} - -// Read bytes from the object - see io.Reader -func (rc *readCloser) Read(p []byte) (n int, err error) { - return rc.in.Read(p) -} - -// Dummy close function -func (rc *readCloser) Close() error { - return nil -} - // Put the object // // Copy the reader in to the new object which is returned diff --git a/backend/mega/mega.go b/backend/mega/mega.go index e197666df..fc7be7344 100644 --- a/backend/mega/mega.go +++ b/backend/mega/mega.go @@ -19,7 +19,6 @@ import ( "fmt" "io" "path" - "regexp" "strings" "sync" "time" @@ -114,9 +113,6 @@ func (f *Fs) Features() *fs.Features { return f.features } -// Pattern to match a mega path -var matcher = regexp.MustCompile(`^([^/]*)(.*)$`) - // parsePath parses an mega 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") diff --git a/backend/onedrive/onedrive.go b/backend/onedrive/onedrive.go index 8834dbb8f..bcea113ea 100644 --- a/backend/onedrive/onedrive.go +++ b/backend/onedrive/onedrive.go @@ -12,7 +12,6 @@ import ( "net/http" "net/url" "path" - "regexp" "strings" "time" @@ -280,9 +279,6 @@ func (f *Fs) Features() *fs.Features { return f.features } -// Pattern to match a one drive path -var matcher = regexp.MustCompile(`^([^/]*)(.*)$`) - // parsePath parses an one drive 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") diff --git a/backend/onedrive/quickxorhash/quickxorhash.go b/backend/onedrive/quickxorhash/quickxorhash.go index 7fb1bbe07..b7976b76f 100644 --- a/backend/onedrive/quickxorhash/quickxorhash.go +++ b/backend/onedrive/quickxorhash/quickxorhash.go @@ -44,7 +44,6 @@ const ( Size = 20 bitsInLastCell = 32 shift = 11 - threshold = 600 widthInBits = 8 * Size dataSize = (widthInBits-1)/64 + 1 ) diff --git a/backend/pcloud/pcloud.go b/backend/pcloud/pcloud.go index e45c0f4e2..936842cec 100644 --- a/backend/pcloud/pcloud.go +++ b/backend/pcloud/pcloud.go @@ -17,7 +17,6 @@ import ( "net/http" "net/url" "path" - "regexp" "strings" "time" @@ -130,9 +129,6 @@ func (f *Fs) Features() *fs.Features { return f.features } -// Pattern to match a pcloud path -var matcher = regexp.MustCompile(`^([^/]*)(.*)$`) - // parsePath parses an pcloud 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") diff --git a/backend/qingstor/upload.go b/backend/qingstor/upload.go index 032e4357e..0640524ce 100644 --- a/backend/qingstor/upload.go +++ b/backend/qingstor/upload.go @@ -19,10 +19,10 @@ import ( ) const ( - maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor - maxMultiPartSize = 1024 * 1024 * 1024 * 1 // The maximum allowed part size when uploading a part to QingStor - minMultiPartSize = 1024 * 1024 * 4 // The minimum allowed part size when uploading a part to QingStor - maxMultiParts = 10000 // The maximum allowed number of parts in an multi-part upload + // maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor + // maxMultiPartSize = 1024 * 1024 * 1024 * 1 // The maximum allowed part size when uploading a part to QingStor + minMultiPartSize = 1024 * 1024 * 4 // The minimum allowed part size when uploading a part to QingStor + maxMultiParts = 10000 // The maximum allowed number of parts in an multi-part upload ) const ( diff --git a/backend/webdav/webdav.go b/backend/webdav/webdav.go index 9daeb995b..6c98ac445 100644 --- a/backend/webdav/webdav.go +++ b/backend/webdav/webdav.go @@ -24,7 +24,6 @@ import ( "net/http" "net/url" "path" - "regexp" "strings" "time" @@ -143,15 +142,6 @@ func (f *Fs) Features() *fs.Features { return f.features } -// Pattern to match a webdav path -var matcher = regexp.MustCompile(`^([^/]*)(.*)$`) - -// parsePath parses an webdav 'url' -func parsePath(path string) (root string) { - root = strings.Trim(path, "/") - return -} - // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 429, // Too Many Requests. diff --git a/backend/yandex/api/api_request.go b/backend/yandex/api/api_request.go deleted file mode 100644 index 5ce3d6b55..000000000 --- a/backend/yandex/api/api_request.go +++ /dev/null @@ -1,5 +0,0 @@ -package src - -type apiRequest interface { - Request() *HTTPRequest -} diff --git a/backend/yandex/api/http_request.go b/backend/yandex/api/http_request.go index 640b37981..be04c848a 100644 --- a/backend/yandex/api/http_request.go +++ b/backend/yandex/api/http_request.go @@ -12,10 +12,6 @@ func createGetRequest(client *Client, path string, params map[string]interface{} return createRequest(client, "GET", path, params) } -func createPostRequest(client *Client, path string, params map[string]interface{}) *HTTPRequest { - return createRequest(client, "POST", path, params) -} - func createRequest(client *Client, method string, path string, parameters map[string]interface{}) *HTTPRequest { var headers = make(map[string][]string) headers["Authorization"] = []string{"OAuth " + client.token} diff --git a/cmd/cmd.go b/cmd/cmd.go index 396803594..6e0d19fe6 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -50,7 +50,6 @@ var ( errorUncategorized = errors.New("uncategorized error") errorNotEnoughArguments = errors.New("not enough arguments") errorTooManyArguents = errors.New("too many arguments") - errorUsageError = errors.New("usage error") ) const ( diff --git a/fs/fserrors/closed_conn_unsupported.go b/fs/fserrors/closed_conn_unsupported.go deleted file mode 100644 index 379d181ad..000000000 --- a/fs/fserrors/closed_conn_unsupported.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build plan9 - -package fserrors - -// isClosedConnErrorPlatform reports whether err is an error from use -// of a closed network connection using platform specific error codes. -func isClosedConnErrorPlatform(err error) bool { - return false -} diff --git a/fs/operations/operations.go b/fs/operations/operations.go index ae12748fa..90fb74a4f 100644 --- a/fs/operations/operations.go +++ b/fs/operations/operations.go @@ -500,22 +500,6 @@ func DeleteFiles(toBeDeleted fs.ObjectsChan) error { return DeleteFilesWithBackupDir(toBeDeleted, nil) } -// Read a Objects into add() for the given Fs. -// dir is the start directory, "" for root -// If includeAll is specified all files will be added, -// otherwise only files passing the filter will be added. -// -// Each object is passed ito the function provided. If that returns -// an error then the listing will be aborted and that error returned. -func readFilesFn(f fs.Fs, includeAll bool, dir string, add func(fs.Object) error) (err error) { - return walk.Walk(f, "", includeAll, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { - if err != nil { - return err - } - return entries.ForObjectError(add) - }) -} - // SameConfig returns true if fdst and fsrc are using the same config // file entry func SameConfig(fdst, fsrc fs.Info) bool {