mirror of
https://github.com/rclone/rclone.git
synced 2024-11-22 09:32:29 +08:00
build: remove unused code spotted by the deadcode linter
This commit is contained in:
parent
cb5bd47e61
commit
1320e84bc2
|
@ -15,8 +15,6 @@ import (
|
|||
|
||||
var (
|
||||
remoteName = "TestAlias"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
)
|
||||
|
||||
func prepare(t *testing.T, root string) {
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -40,7 +39,6 @@ import (
|
|||
const (
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
assetKind = "ASSET"
|
||||
statusAvailable = "AVAILABLE"
|
||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||
minSleep = 20 * time.Millisecond
|
||||
|
@ -138,9 +136,6 @@ func (f *Fs) Features() *fs.Features {
|
|||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a acd path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an acd 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
|
@ -32,16 +31,16 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
apiVersion = "2017-04-17"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 10 * time.Second
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
listChunkSize = 5000 // number of items to read at once
|
||||
modTimeKey = "mtime"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
maxTotalParts = 50000 // in multipart upload
|
||||
maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
||||
apiVersion = "2017-04-17"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 10 * time.Second
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
listChunkSize = 5000 // number of items to read at once
|
||||
modTimeKey = "mtime"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
maxTotalParts = 50000 // in multipart upload
|
||||
// maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
||||
)
|
||||
|
||||
// Globals
|
||||
|
@ -847,16 +846,6 @@ func (o *Object) Storable() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// openFile represents an Object open for reading
|
||||
type openFile struct {
|
||||
o *Object // Object we are reading for
|
||||
resp *http.Response // response of the GET
|
||||
body io.Reader // reading from here
|
||||
hash gohash.Hash // currently accumulating MD5
|
||||
bytes int64 // number of bytes read on this connection
|
||||
eof bool // whether we have read end of file
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
getBlobOptions := storage.GetBlobOptions{}
|
||||
|
@ -924,20 +913,6 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
// urlEncode encodes in with % encoding
|
||||
func urlEncode(in string) string {
|
||||
var out bytes.Buffer
|
||||
for i := 0; i < len(in); i++ {
|
||||
c := in[i]
|
||||
if noNeedToEncode[c] {
|
||||
_ = out.WriteByte(c)
|
||||
} else {
|
||||
_, _ = out.WriteString(fmt.Sprintf("%%%2X", c))
|
||||
}
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
//
|
||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -135,9 +134,6 @@ func (f *Fs) Features() *fs.Features {
|
|||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a box path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an box 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
|
|
7
backend/cache/storage_persistent.go
vendored
7
backend/cache/storage_persistent.go
vendored
|
@ -1070,10 +1070,3 @@ func itob(v int64) []byte {
|
|||
func btoi(d []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(d))
|
||||
}
|
||||
|
||||
// cloneBytes returns a copy of a given slice.
|
||||
func cloneBytes(v []byte) []byte {
|
||||
var clone = make([]byte, len(v))
|
||||
copy(clone, v)
|
||||
return clone
|
||||
}
|
||||
|
|
|
@ -30,9 +30,6 @@ import (
|
|||
const (
|
||||
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
|
||||
statusResumeIncomplete = 308
|
||||
|
||||
// Number of times to try each chunk
|
||||
maxTries = 10
|
||||
)
|
||||
|
||||
// resumableUpload is used by the generated APIs to provide resumable uploads.
|
||||
|
@ -192,7 +189,7 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
|
|||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
// It retries each chunk maxTries times (with a pause of uploadPause between attempts).
|
||||
// It retries each chunk using the pacer and --low-level-retries
|
||||
func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
start := int64(0)
|
||||
var StatusCode int
|
||||
|
|
|
@ -419,21 +419,6 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||
return entries, nil
|
||||
}
|
||||
|
||||
// A read closer which doesn't close the input
|
||||
type readCloser struct {
|
||||
in io.Reader
|
||||
}
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
func (rc *readCloser) Read(p []byte) (n int, err error) {
|
||||
return rc.in.Read(p)
|
||||
}
|
||||
|
||||
// Dummy close function
|
||||
func (rc *readCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
|
|
|
@ -19,7 +19,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -114,9 +113,6 @@ func (f *Fs) Features() *fs.Features {
|
|||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a mega path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an mega 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -280,9 +279,6 @@ func (f *Fs) Features() *fs.Features {
|
|||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a one drive path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an one drive 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
|
|
|
@ -44,7 +44,6 @@ const (
|
|||
Size = 20
|
||||
bitsInLastCell = 32
|
||||
shift = 11
|
||||
threshold = 600
|
||||
widthInBits = 8 * Size
|
||||
dataSize = (widthInBits-1)/64 + 1
|
||||
)
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -130,9 +129,6 @@ func (f *Fs) Features() *fs.Features {
|
|||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a pcloud path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an pcloud 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
|
|
|
@ -19,10 +19,10 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor
|
||||
maxMultiPartSize = 1024 * 1024 * 1024 * 1 // The maximum allowed part size when uploading a part to QingStor
|
||||
minMultiPartSize = 1024 * 1024 * 4 // The minimum allowed part size when uploading a part to QingStor
|
||||
maxMultiParts = 10000 // The maximum allowed number of parts in an multi-part upload
|
||||
// maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor
|
||||
// maxMultiPartSize = 1024 * 1024 * 1024 * 1 // The maximum allowed part size when uploading a part to QingStor
|
||||
minMultiPartSize = 1024 * 1024 * 4 // The minimum allowed part size when uploading a part to QingStor
|
||||
maxMultiParts = 10000 // The maximum allowed number of parts in an multi-part upload
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -143,15 +142,6 @@ func (f *Fs) Features() *fs.Features {
|
|||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a webdav path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an webdav 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
package src
|
||||
|
||||
type apiRequest interface {
|
||||
Request() *HTTPRequest
|
||||
}
|
|
@ -12,10 +12,6 @@ func createGetRequest(client *Client, path string, params map[string]interface{}
|
|||
return createRequest(client, "GET", path, params)
|
||||
}
|
||||
|
||||
func createPostRequest(client *Client, path string, params map[string]interface{}) *HTTPRequest {
|
||||
return createRequest(client, "POST", path, params)
|
||||
}
|
||||
|
||||
func createRequest(client *Client, method string, path string, parameters map[string]interface{}) *HTTPRequest {
|
||||
var headers = make(map[string][]string)
|
||||
headers["Authorization"] = []string{"OAuth " + client.token}
|
||||
|
|
|
@ -50,7 +50,6 @@ var (
|
|||
errorUncategorized = errors.New("uncategorized error")
|
||||
errorNotEnoughArguments = errors.New("not enough arguments")
|
||||
errorTooManyArguents = errors.New("too many arguments")
|
||||
errorUsageError = errors.New("usage error")
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
// +build plan9
|
||||
|
||||
package fserrors
|
||||
|
||||
// isClosedConnErrorPlatform reports whether err is an error from use
|
||||
// of a closed network connection using platform specific error codes.
|
||||
func isClosedConnErrorPlatform(err error) bool {
|
||||
return false
|
||||
}
|
|
@ -500,22 +500,6 @@ func DeleteFiles(toBeDeleted fs.ObjectsChan) error {
|
|||
return DeleteFilesWithBackupDir(toBeDeleted, nil)
|
||||
}
|
||||
|
||||
// Read a Objects into add() for the given Fs.
|
||||
// dir is the start directory, "" for root
|
||||
// If includeAll is specified all files will be added,
|
||||
// otherwise only files passing the filter will be added.
|
||||
//
|
||||
// Each object is passed ito the function provided. If that returns
|
||||
// an error then the listing will be aborted and that error returned.
|
||||
func readFilesFn(f fs.Fs, includeAll bool, dir string, add func(fs.Object) error) (err error) {
|
||||
return walk.Walk(f, "", includeAll, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return entries.ForObjectError(add)
|
||||
})
|
||||
}
|
||||
|
||||
// SameConfig returns true if fdst and fsrc are using the same config
|
||||
// file entry
|
||||
func SameConfig(fdst, fsrc fs.Info) bool {
|
||||
|
|
Loading…
Reference in New Issue
Block a user