From d0888edc0a229c7858cee1a5c9c851b5c51642a2 Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Thu, 8 Oct 2020 20:17:24 -0400 Subject: [PATCH] Spelling fixes Fix spelling of: above, already, anonymous, associated, authentication, bandwidth, because, between, blocks, calculate, candidates, cautious, changelog, cleaner, clipboard, command, completely, concurrently, considered, constructs, corrupt, current, daemon, dependencies, deprecated, directory, dispatcher, download, eligible, ellipsis, encrypter, endpoint, entrieslist, essentially, existing writers, existing, expires, filesystem, flushing, frequently, hierarchy, however, implementation, implements, inaccurate, individually, insensitive, longer, maximum, metadata, modified, multipart, namedirfirst, nextcloud, obscured, opened, optional, owncloud, pacific, passphrase, password, permanently, persimmon, positive, potato, protocol, quota, receiving, recommends, referring, requires, revisited, satisfied, satisfies, satisfy, semver, serialized, session, storage, strategies, stringlist, successful, supported, surprise, temporarily, temporary, transactions, unneeded, update, uploads, wrapped Signed-off-by: Josh Soref --- CONTRIBUTING.md | 2 +- backend/azureblob/azureblob.go | 8 ++++---- backend/box/box.go | 2 +- backend/box/upload.go | 2 +- backend/crypt/cipher.go | 2 +- backend/crypt/crypt.go | 2 +- backend/crypt/crypt_internal_test.go | 2 +- backend/drive/drive.go | 4 ++-- backend/dropbox/dropbox.go | 2 +- backend/fichier/fichier.go | 4 ++-- backend/googlephotos/googlephotos_test.go | 2 +- backend/hubic/hubic.go | 2 +- backend/jottacloud/api/types.go | 6 +++--- backend/jottacloud/jottacloud.go | 12 ++++++------ backend/mailru/mailru.go | 4 ++-- backend/memory/memory.go | 2 +- backend/onedrive/api/types.go | 2 +- backend/pcloud/pcloud.go | 2 +- backend/putio/fs.go | 4 ++-- backend/putio/object.go | 2 +- backend/qingstor/qingstor.go | 2 +- backend/s3/s3.go | 6 +++--- backend/sftp/sftp.go | 2 +- backend/sharefile/api/types.go | 2 +- backend/sharefile/sharefile.go | 2 +- backend/sugarsync/sugarsync.go | 2 +- backend/tardigrade/fs.go | 4 ++-- backend/union/policy/epall.go | 2 +- backend/union/policy/epmfs.go | 2 +- backend/union/policy/ff.go | 2 +- backend/union/union.go | 18 +++++++++--------- backend/union/upstream/upstream.go | 2 +- backend/webdav/webdav_test.go | 2 +- backend/yandex/api/types.go | 2 +- bin/make_changelog.py | 2 +- bin/make_test_files.go | 8 ++++---- cmd/cmd.go | 2 +- cmd/cmount/fs.go | 2 +- cmd/config/config.go | 2 +- cmd/info/info.go | 2 +- cmd/mount2/file.go | 2 +- cmd/mount2/node.go | 2 +- cmd/ncdu/ncdu.go | 2 +- cmd/serve/http/http_test.go | 2 +- cmd/serve/httplib/httplib.go | 2 +- cmd/serve/httplib/serve/dir.go | 2 +- cmd/serve/proxy/proxy.go | 2 +- cmd/touch/touch_test.go | 2 +- docs/content/changelog.md | 14 +++++++------- docs/content/install.md | 2 +- docs/content/jottacloud.md | 6 +++--- docs/content/overview.md | 2 +- docs/content/qingstor.md | 2 +- docs/content/s3.md | 8 ++++---- docs/content/tardigrade.md | 2 +- fs/accounting/accounting.go | 2 +- fs/accounting/token_bucket.go | 2 +- fs/accounting/transfermap.go | 6 +++--- fs/bwtimetable.go | 2 +- fs/chunkedreader/chunkedreader.go | 2 +- fs/config.go | 2 +- fs/config/config_test.go | 2 +- fs/operations/multithread.go | 14 +++++++------- fs/sync/pipe.go | 2 +- fs/sync/sync.go | 2 +- fs/walk/walk.go | 2 +- fstest/run.go | 2 +- fstest/test_all/config.go | 2 +- fstest/testserver/init.d/TestWebdavOwncloud | 2 +- lib/encoder/encoder.go | 2 +- lib/random/random.go | 2 +- lib/ranges/ranges.go | 2 +- lib/structs/structs.go | 2 +- lib/structs/structs_test.go | 6 +++--- notes.txt | 2 +- vfs/dir.go | 6 +++--- vfs/dir_handle.go | 2 +- vfs/file.go | 6 +++--- vfs/help.go | 2 +- vfs/read.go | 2 +- vfs/read_write.go | 2 +- vfs/vfs.go | 2 +- vfs/vfs_test.go | 2 +- vfs/vfscache/cache.go | 10 +++++----- vfs/vfscache/cache_test.go | 16 ++++++++-------- vfs/vfscache/downloaders/downloaders.go | 4 ++-- vfs/vfscache/item.go | 8 ++++---- vfs/vfscache/item_test.go | 2 +- vfs/vfscache/writeback/writeback.go | 4 ++-- vfs/vfstest/dir.go | 2 +- vfs/write.go | 4 ++-- 91 files changed, 161 insertions(+), 161 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 32a86b027..5fa8d96fd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -86,7 +86,7 @@ git reset --soft HEAD~2 # This squashes the 2 latest commits together. git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo. git commit # Add a new commit message. git push --force # Push the squashed commit to your GitHub repo. -# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also reccommends wizardzines.com +# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com ``` ## CI for your fork ## diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index 96a20fde1..a9ceec9d6 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -1245,15 +1245,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read } blob := o.getBlobReference() ac := azblob.BlobAccessConditions{} - var dowloadResponse *azblob.DownloadResponse + var downloadResponse *azblob.DownloadResponse err = o.fs.pacer.Call(func() (bool, error) { - dowloadResponse, err = blob.Download(ctx, offset, count, ac, false) + downloadResponse, err = blob.Download(ctx, offset, count, ac, false) return o.fs.shouldRetry(err) }) if err != nil { return nil, errors.Wrap(err, "failed to open for download") } - in = dowloadResponse.Body(azblob.RetryReaderOptions{}) + in = downloadResponse.Body(azblob.RetryReaderOptions{}) return in, nil } @@ -1475,7 +1475,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op } // FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75 // is merged the SDK can't upload a single blob of exactly the chunk - // size, so upload with a multpart upload to work around. + // size, so upload with a multipart upload to work around. // See: https://github.com/rclone/rclone/issues/2653 multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff) if size == int64(o.fs.opt.ChunkSize) { diff --git a/backend/box/box.go b/backend/box/box.go index 7a9f34804..2de3c7711 100644 --- a/backend/box/box.go +++ b/backend/box/box.go @@ -1013,7 +1013,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, return info.SharedLink.URL, err } -// deletePermanently permenently deletes a trashed file +// deletePermanently permanently deletes a trashed file func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error { opts := rest.Opts{ Method: "DELETE", diff --git a/backend/box/upload.go b/backend/box/upload.go index 73390e66b..649198a52 100644 --- a/backend/box/upload.go +++ b/backend/box/upload.go @@ -1,4 +1,4 @@ -// multpart upload for box +// multipart upload for box package box diff --git a/backend/crypt/cipher.go b/backend/crypt/cipher.go index 11f330e5f..77dc34643 100644 --- a/backend/crypt/cipher.go +++ b/backend/crypt/cipher.go @@ -147,7 +147,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo // If salt is "" we use a fixed salt just to make attackers lives // slighty harder than using no salt. // -// Note that empty passsword makes all 0x00 keys which is used in the +// Note that empty password makes all 0x00 keys which is used in the // tests. func (c *Cipher) Key(password, salt string) (err error) { const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak) diff --git a/backend/crypt/crypt.go b/backend/crypt/crypt.go index 0ba57d9e4..030c5d252 100644 --- a/backend/crypt/crypt.go +++ b/backend/crypt/crypt.go @@ -159,7 +159,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) { if strings.HasPrefix(remote, name+":") { return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting") } - // Make sure to remove trailing . reffering to the current dir + // Make sure to remove trailing . referring to the current dir if path.Base(rpath) == "." { rpath = strings.TrimSuffix(rpath, ".") } diff --git a/backend/crypt/crypt_internal_test.go b/backend/crypt/crypt_internal_test.go index 38e064977..554c6c57b 100644 --- a/backend/crypt/crypt_internal_test.go +++ b/backend/crypt/crypt_internal_test.go @@ -87,7 +87,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) { } // wrap the object in a crypt for upload using the nonce we - // saved from the encryptor + // saved from the encrypter src := f.newObjectInfo(oi, nonce) // Test ObjectInfo methods diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 07a4b92e3..18d47c928 100755 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -2026,10 +2026,10 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - exisitingObj, err := f.NewObject(ctx, src.Remote()) + existingObj, err := f.NewObject(ctx, src.Remote()) switch err { case nil: - return exisitingObj, exisitingObj.Update(ctx, in, src, options...) + return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it return f.PutUnchecked(ctx, in, src, options...) diff --git a/backend/dropbox/dropbox.go b/backend/dropbox/dropbox.go index c3d707f43..0e2399be2 100755 --- a/backend/dropbox/dropbox.go +++ b/backend/dropbox/dropbox.go @@ -1193,7 +1193,7 @@ func (o *Object) Size() int64 { // setMetadataFromEntry sets the fs data from a files.FileMetadata // -// This isn't a complete set of metadata and has an inacurate date +// This isn't a complete set of metadata and has an inaccurate date func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error { o.id = info.Id o.bytes = int64(info.Size) diff --git a/backend/fichier/fichier.go b/backend/fichier/fichier.go index b5cc3b1ae..31f17088a 100644 --- a/backend/fichier/fichier.go +++ b/backend/fichier/fichier.go @@ -306,10 +306,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - exisitingObj, err := f.NewObject(ctx, src.Remote()) + existingObj, err := f.NewObject(ctx, src.Remote()) switch err { case nil: - return exisitingObj, exisitingObj.Update(ctx, in, src, options...) + return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it return f.PutUnchecked(ctx, in, src, options...) diff --git a/backend/googlephotos/googlephotos_test.go b/backend/googlephotos/googlephotos_test.go index 3b2b6f2e5..65e811346 100644 --- a/backend/googlephotos/googlephotos_test.go +++ b/backend/googlephotos/googlephotos_test.go @@ -115,7 +115,7 @@ func TestIntegration(t *testing.T) { assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String()) }) - // Check it is there in the date/month/year heirachy + // Check it is there in the date/month/year hierarchy // 2013-07-13 is the creation date of the folder checkPresent := func(t *testing.T, objPath string) { entries, err := f.List(ctx, objPath) diff --git a/backend/hubic/hubic.go b/backend/hubic/hubic.go index daab254fa..92a977ad8 100644 --- a/backend/hubic/hubic.go +++ b/backend/hubic/hubic.go @@ -4,7 +4,7 @@ package hubic // This uses the normal swift mechanism to update the credentials and // ignores the expires field returned by the Hubic API. This may need -// to be revisted after some actual experience. +// to be revisited after some actual experience. import ( "context" diff --git a/backend/jottacloud/api/types.go b/backend/jottacloud/api/types.go index 06395f06a..993f8e6e3 100644 --- a/backend/jottacloud/api/types.go +++ b/backend/jottacloud/api/types.go @@ -153,9 +153,9 @@ type CustomerInfo struct { AccountType string `json:"account_type"` SubscriptionType string `json:"subscription_type"` Usage int64 `json:"usage"` - Qouta int64 `json:"quota"` + Quota int64 `json:"quota"` BusinessUsage int64 `json:"business_usage"` - BusinessQouta int64 `json:"business_quota"` + BusinessQuota int64 `json:"business_quota"` WriteLocked bool `json:"write_locked"` ReadLocked bool `json:"read_locked"` LockedCause interface{} `json:"locked_cause"` @@ -386,7 +386,7 @@ type Error struct { Cause string `xml:"cause"` } -// Error returns a string for the error and statistifes the error interface +// Error returns a string for the error and satisfies the error interface func (e *Error) Error() string { out := fmt.Sprintf("error %d", e.StatusCode) if e.Message != "" { diff --git a/backend/jottacloud/jottacloud.go b/backend/jottacloud/jottacloud.go index 3cf2b944b..29c307183 100644 --- a/backend/jottacloud/jottacloud.go +++ b/backend/jottacloud/jottacloud.go @@ -107,7 +107,7 @@ func init() { } } - fmt.Printf("Use legacy authentification?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n") + fmt.Printf("Use legacy authentication?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n") if config.Confirm(false) { v1config(ctx, name, m) } else { @@ -230,7 +230,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) { return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } -// v1config configure a jottacloud backend using legacy authentification +// v1config configure a jottacloud backend using legacy authentication func v1config(ctx context.Context, name string, m configmap.Mapper) { srv := rest.NewClient(fshttp.NewClient(fs.Config)) @@ -323,7 +323,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis return deviceRegistration, err } -// doAuthV1 runs the actual token request for V1 authentification +// doAuthV1 runs the actual token request for V1 authentication func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) { // prepare out token request with username and password values := url.Values{} @@ -365,7 +365,7 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) return token, err } -// v2config configure a jottacloud backend using the modern JottaCli token based authentification +// v2config configure a jottacloud backend using the modern JottaCli token based authentication func v2config(ctx context.Context, name string, m configmap.Mapper) { srv := rest.NewClient(fshttp.NewClient(fs.Config)) @@ -405,7 +405,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) { m.Set("configVersion", strconv.Itoa(configVersion)) } -// doAuthV2 runs the actual token request for V2 authentification +// doAuthV2 runs the actual token request for V2 authentication func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) { loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64) if err != nil { @@ -1512,7 +1512,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op return err } - // If the file state is INCOMPLETE and CORRPUT, try to upload a then + // If the file state is INCOMPLETE and CORRUPT, try to upload a then if response.State != "COMPLETED" { // how much do we still have to upload? remainingBytes := size - response.ResumePos diff --git a/backend/mailru/mailru.go b/backend/mailru/mailru.go index be0386bfd..9c6d6433a 100644 --- a/backend/mailru/mailru.go +++ b/backend/mailru/mailru.go @@ -273,7 +273,7 @@ type Fs struct { root string // root path opt Options // parsed options speedupGlobs []string // list of file name patterns eligible for speedup - speedupAny bool // true if all file names are aligible for speedup + speedupAny bool // true if all file names are eligible for speedup features *fs.Features // optional features srv *rest.Client // REST API client cli *http.Client // underlying HTTP client (for authorize) @@ -2214,7 +2214,7 @@ func (e *endHandler) handle(err error) error { return io.EOF } -// serverPool backs server dispacher +// serverPool backs server dispatcher type serverPool struct { pool pendingServerMap mu sync.Mutex diff --git a/backend/memory/memory.go b/backend/memory/memory.go index 95677c7b8..538878789 100644 --- a/backend/memory/memory.go +++ b/backend/memory/memory.go @@ -221,7 +221,7 @@ func (f *Fs) setRoot(root string) { f.rootBucket, f.rootDirectory = bucket.Split(f.root) } -// NewFs contstructs an Fs from the path, bucket:path +// NewFs constructs an Fs from the path, bucket:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) diff --git a/backend/onedrive/api/types.go b/backend/onedrive/api/types.go index 53140819c..7de8256bd 100644 --- a/backend/onedrive/api/types.go +++ b/backend/onedrive/api/types.go @@ -254,7 +254,7 @@ type MoveItemRequest struct { //Always Type:view and Scope:anonymous for public sharing type CreateShareLinkRequest struct { Type string `json:"type"` //Link type in View, Edit or Embed - Scope string `json:"scope,omitempty"` //Optional. Scope in anonymousi, organization + Scope string `json:"scope,omitempty"` //Optional. Scope in anonymous, organization } //CreateShareLinkResponse is the response from CreateShareLinkRequest diff --git a/backend/pcloud/pcloud.go b/backend/pcloud/pcloud.go index 9254b5a69..ce3ebba52 100644 --- a/backend/pcloud/pcloud.go +++ b/backend/pcloud/pcloud.go @@ -1125,7 +1125,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op // Special treatment for a 0 length upload. This doesn't work // with PUT even with Content-Length set (by setting - // opts.Body=0), so upload it as a multpart form POST with + // opts.Body=0), so upload it as a multipart form POST with // Content-Length set. if size == 0 { formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf) diff --git a/backend/putio/fs.go b/backend/putio/fs.go index c6f3c0e9e..b0dc3515b 100644 --- a/backend/putio/fs.go +++ b/backend/putio/fs.go @@ -236,10 +236,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) { // defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err) - exisitingObj, err := f.NewObject(ctx, src.Remote()) + existingObj, err := f.NewObject(ctx, src.Remote()) switch err { case nil: - return exisitingObj, exisitingObj.Update(ctx, in, src, options...) + return existingObj, existingObj.Update(ctx, in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it return f.PutUnchecked(ctx, in, src, options...) diff --git a/backend/putio/object.go b/backend/putio/object.go index 2507f6488..16de2930c 100644 --- a/backend/putio/object.go +++ b/backend/putio/object.go @@ -115,7 +115,7 @@ func (o *Object) MimeType(ctx context.Context) string { // setMetadataFromEntry sets the fs data from a putio.File // -// This isn't a complete set of metadata and has an inacurate date +// This isn't a complete set of metadata and has an inaccurate date func (o *Object) setMetadataFromEntry(info putio.File) error { o.file = &info o.modtime = info.UpdatedAt.Time diff --git a/backend/qingstor/qingstor.go b/backend/qingstor/qingstor.go index 20de9ae6e..66e6bd333 100644 --- a/backend/qingstor/qingstor.go +++ b/backend/qingstor/qingstor.go @@ -104,7 +104,7 @@ enough memory, then increasing this will speed up the transfers.`, This is the number of chunks of the same file that are uploaded concurrently. -NB if you set this to > 1 then the checksums of multpart uploads +NB if you set this to > 1 then the checksums of multipart uploads become corrupted (the uploads themselves are not corrupted though). If you are uploading small numbers of large file over high speed link diff --git a/backend/s3/s3.go b/backend/s3/s3.go index b3beab1ff..d3694cc2c 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -2364,7 +2364,7 @@ All the objects shown will be marked for restore, then rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard It returns a list of status dictionaries with Remote and Status -keys. The Status will be OK if it was successfull or an error message +keys. The Status will be OK if it was successful or an error message if not. [ @@ -2529,7 +2529,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str // listMultipartUploads lists all outstanding multipart uploads for (bucket, key) // // Note that rather lazily we treat key as a prefix so it matches -// directories and objects. This could suprise the user if they ask +// directories and objects. This could surprise the user if they ask // for "dir" and it returns "dirKey" func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uploads []*s3.MultipartUpload, err error) { var ( @@ -3062,7 +3062,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op } // read the md5sum if available - // - for non multpart + // - for non multipart // - so we can add a ContentMD5 // - for multipart provided checksums aren't disabled // - so we can add the md5sum in the metadata as metaMD5Hash diff --git a/backend/sftp/sftp.go b/backend/sftp/sftp.go index a50f1cc3a..5ff2adedc 100644 --- a/backend/sftp/sftp.go +++ b/backend/sftp/sftp.go @@ -952,7 +952,7 @@ func (f *Fs) run(cmd string) ([]byte, error) { session, err := c.sshClient.NewSession() if err != nil { - return nil, errors.Wrap(err, "run: get SFTP sessiion") + return nil, errors.Wrap(err, "run: get SFTP session") } defer func() { _ = session.Close() diff --git a/backend/sharefile/api/types.go b/backend/sharefile/api/types.go index 282087bf3..655681351 100644 --- a/backend/sharefile/api/types.go +++ b/backend/sharefile/api/types.go @@ -95,7 +95,7 @@ type UploadSpecification struct { ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads - IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supproted. + IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supported. ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true. ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server diff --git a/backend/sharefile/sharefile.go b/backend/sharefile/sharefile.go index ee392f490..c744efbdc 100644 --- a/backend/sharefile/sharefile.go +++ b/backend/sharefile/sharefile.go @@ -1090,7 +1090,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj } else if err != nil { return nil, errors.Wrap(err, "copy: failed to examine destination dir") } else { - // otherwise need to copy via a temporary directlry + // otherwise need to copy via a temporary directory } } diff --git a/backend/sugarsync/sugarsync.go b/backend/sugarsync/sugarsync.go index b4a71553e..d41ec7f88 100644 --- a/backend/sugarsync/sugarsync.go +++ b/backend/sugarsync/sugarsync.go @@ -350,7 +350,7 @@ func (f *Fs) getAuth(req *http.Request) (err error) { // if have auth, check it is in date if f.opt.Authorization == "" || f.opt.User == "" || f.authExpiry.IsZero() || time.Until(f.authExpiry) < expiryLeeway { // Get the auth token - f.srv.SetSigner(nil) // temporariliy remove the signer so we don't infinitely recurse + f.srv.SetSigner(nil) // temporarily remove the signer so we don't infinitely recurse err = f.getAuthToken(ctx) f.srv.SetSigner(f.getAuth) // replace signer if err != nil { diff --git a/backend/tardigrade/fs.go b/backend/tardigrade/fs.go index 8cd20138e..caf203d39 100644 --- a/backend/tardigrade/fs.go +++ b/backend/tardigrade/fs.go @@ -67,12 +67,12 @@ func init() { log.Fatalf("Couldn't create access grant: %v", err) } - serialziedAccess, err := access.Serialize() + serializedAccess, err := access.Serialize() if err != nil { log.Fatalf("Couldn't serialize access grant: %v", err) } configMapper.Set("satellite_address", satellite) - configMapper.Set("access_grant", serialziedAccess) + configMapper.Set("access_grant", serializedAccess) } else if provider == existingProvider { config.FileDeleteKey(name, "satellite_address") config.FileDeleteKey(name, "api_key") diff --git a/backend/union/policy/epall.go b/backend/union/policy/epall.go index caac0b17d..24ce7c6b2 100644 --- a/backend/union/policy/epall.go +++ b/backend/union/policy/epall.go @@ -61,7 +61,7 @@ func (p *EpAll) Action(ctx context.Context, upstreams []*upstream.Fs, path strin return p.epall(ctx, upstreams, path) } -// ActionEntries is ACTION category policy but receivng a set of candidate entries +// ActionEntries is ACTION category policy but receiving a set of candidate entries func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound diff --git a/backend/union/policy/epmfs.go b/backend/union/policy/epmfs.go index 1a2625009..915f9a02a 100644 --- a/backend/union/policy/epmfs.go +++ b/backend/union/policy/epmfs.go @@ -106,7 +106,7 @@ func (p *EpMfs) Search(ctx context.Context, upstreams []*upstream.Fs, path strin return p.mfs(upstreams) } -// SearchEntries is SEARCH category policy but receivng a set of candidate entries +// SearchEntries is SEARCH category policy but receiving a set of candidate entries func (p *EpMfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) { if len(entries) == 0 { return nil, fs.ErrorObjectNotFound diff --git a/backend/union/policy/ff.go b/backend/union/policy/ff.go index 280c81cc2..9899d5d74 100644 --- a/backend/union/policy/ff.go +++ b/backend/union/policy/ff.go @@ -14,7 +14,7 @@ func init() { // FF stands for first found // Search category: same as epff. // Action category: same as epff. -// Create category: Given the order of the candiates, act on the first one found. +// Create category: Given the order of the candidates, act on the first one found. type FF struct { EpFF } diff --git a/backend/union/union.go b/backend/union/union.go index 17e988956..fdb447f57 100644 --- a/backend/union/union.go +++ b/backend/union/union.go @@ -60,7 +60,7 @@ func init() { // Options defines the configuration for this backend type Options struct { Upstreams fs.SpaceSepList `config:"upstreams"` - Remotes fs.SpaceSepList `config:"remotes"` // Depreated + Remotes fs.SpaceSepList `config:"remotes"` // Deprecated ActionPolicy string `config:"action_policy"` CreatePolicy string `config:"create_policy"` SearchPolicy string `config:"search_policy"` @@ -567,7 +567,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { - entriess := make([][]upstream.Entry, len(f.upstreams)) + entriesList := make([][]upstream.Entry, len(f.upstreams)) errs := Errors(make([]error, len(f.upstreams))) multithread(len(f.upstreams), func(i int) { u := f.upstreams[i] @@ -580,7 +580,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e for j, e := range entries { uEntries[j], _ = u.WrapEntry(e) } - entriess[i] = uEntries + entriesList[i] = uEntries }) if len(errs) == len(errs.FilterNil()) { errs = errs.Map(func(e error) error { @@ -594,7 +594,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e } return nil, errs.Err() } - return f.mergeDirEntries(entriess) + return f.mergeDirEntries(entriesList) } // ListR lists the objects and directories of the Fs starting @@ -614,7 +614,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { - var entriess [][]upstream.Entry + var entriesList [][]upstream.Entry errs := Errors(make([]error, len(f.upstreams))) var mutex sync.Mutex multithread(len(f.upstreams), func(i int) { @@ -626,7 +626,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) ( uEntries[j], _ = u.WrapEntry(e) } mutex.Lock() - entriess = append(entriess, uEntries) + entriesList = append(entriesList, uEntries) mutex.Unlock() return nil } @@ -653,7 +653,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) ( } return errs.Err() } - entries, err := f.mergeDirEntries(entriess) + entries, err := f.mergeDirEntries(entriesList) if err != nil { return err } @@ -724,9 +724,9 @@ func (f *Fs) searchEntries(entries ...upstream.Entry) (upstream.Entry, error) { return f.searchPolicy.SearchEntries(entries...) } -func (f *Fs) mergeDirEntries(entriess [][]upstream.Entry) (fs.DirEntries, error) { +func (f *Fs) mergeDirEntries(entriesList [][]upstream.Entry) (fs.DirEntries, error) { entryMap := make(map[string]([]upstream.Entry)) - for _, en := range entriess { + for _, en := range entriesList { if en == nil { continue } diff --git a/backend/union/upstream/upstream.go b/backend/union/upstream/upstream.go index 49a993b28..4360b0b25 100644 --- a/backend/union/upstream/upstream.go +++ b/backend/union/upstream/upstream.go @@ -52,7 +52,7 @@ type Object struct { f *Fs } -// Entry describe a warpped fs.DirEntry interface with the +// Entry describe a wrapped fs.DirEntry interface with the // information of upstream Fs type Entry interface { fs.DirEntry diff --git a/backend/webdav/webdav_test.go b/backend/webdav/webdav_test.go index 9c7d82bd3..3b4edcb2d 100644 --- a/backend/webdav/webdav_test.go +++ b/backend/webdav/webdav_test.go @@ -12,7 +12,7 @@ import ( // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ - RemoteName: "TestWebdavNexcloud:", + RemoteName: "TestWebdavNextcloud:", NilObject: (*webdav.Object)(nil), }) } diff --git a/backend/yandex/api/types.go b/backend/yandex/api/types.go index b3d7789ab..978b1d00c 100644 --- a/backend/yandex/api/types.go +++ b/backend/yandex/api/types.go @@ -20,7 +20,7 @@ type ResourceInfoRequestOptions struct { Fields []string } -//ResourceInfoResponse struct is returned by the API for metedata requests. +//ResourceInfoResponse struct is returned by the API for metadata requests. type ResourceInfoResponse struct { PublicKey string `json:"public_key"` Name string `json:"name"` diff --git a/bin/make_changelog.py b/bin/make_changelog.py index 5ee82fd83..02a15d5ce 100755 --- a/bin/make_changelog.py +++ b/bin/make_changelog.py @@ -141,7 +141,7 @@ def main(): for name in sorted(bugfixes.keys()): out(name) - # Read old changlog and split + # Read old changelog and split with open("docs/content/changelog.md") as fd: old_changelog = fd.read() heading = "# Changelog" diff --git a/bin/make_test_files.go b/bin/make_test_files.go index 6fe5311d0..f2fd4b3f5 100644 --- a/bin/make_test_files.go +++ b/bin/make_test_files.go @@ -19,7 +19,7 @@ var ( // Flags numberOfFiles = flag.Int("n", 1000, "Number of files to create") averageFilesPerDirectory = flag.Int("files-per-directory", 10, "Average number of files per directory") - maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory heirachy") + maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory hierarchy") minFileSize = flag.Int64("min-size", 0, "Minimum size of file to create") maxFileSize = flag.Int64("max-size", 100, "Maximum size of files to create") minFileNameLength = flag.Int("min-name-length", 4, "Minimum size of file to create") @@ -61,7 +61,7 @@ func fileName() (name string) { return name } -// dir is a directory in the directory heirachy being built up +// dir is a directory in the directory hierarchy being built up type dir struct { name string depth int @@ -69,7 +69,7 @@ type dir struct { parent *dir } -// Create a random directory heirachy under d +// Create a random directory hierarchy under d func (d *dir) createDirectories() { for totalDirectories < directoriesToCreate { newDir := &dir{ @@ -91,7 +91,7 @@ func (d *dir) createDirectories() { return } -// list the directory heirachy +// list the directory hierarchy func (d *dir) list(path string, output []string) []string { dirPath := filepath.Join(path, d.name) output = append(output, dirPath) diff --git a/cmd/cmd.go b/cmd/cmd.go index df1a208b0..0114a8f4e 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -1,4 +1,4 @@ -// Package cmd implemnts the rclone command +// Package cmd implements the rclone command // // It is in a sub package so it's internals can be re-used elsewhere package cmd diff --git a/cmd/cmount/fs.go b/cmd/cmount/fs.go index ae91e91ab..34b5b7f66 100644 --- a/cmd/cmount/fs.go +++ b/cmd/cmount/fs.go @@ -270,7 +270,7 @@ func (fsys *FS) Releasedir(path string, fh uint64) (errc int) { return fsys.closeHandle(fh) } -// Statfs reads overall stats on the filessystem +// Statfs reads overall stats on the filesystem func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) { defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc) const blockSize = 4096 diff --git a/cmd/config/config.go b/cmd/config/config.go index 7f7f21231..51ce4746b 100644 --- a/cmd/config/config.go +++ b/cmd/config/config.go @@ -111,7 +111,7 @@ whether the password is already obscured or not and put unobscured passwords into the config file. If you want to be 100% certain that the passwords get obscured then use the "--obscure" flag, or if you are 100% certain you are already passing obscured passwords then use -"--no-obscure". You can also set osbscured passwords using the +"--no-obscure". You can also set obscured passwords using the "rclone config password" command. ` diff --git a/cmd/info/info.go b/cmd/info/info.go index ed5691887..66642fc70 100644 --- a/cmd/info/info.go +++ b/cmd/info/info.go @@ -49,7 +49,7 @@ func init() { flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", true, "Check control characters.") flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.") flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", true, "Check max filename length.") - flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploadxs with indeterminate file size.") + flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploads with indeterminate file size.") } var commandDefinition = &cobra.Command{ diff --git a/cmd/mount2/file.go b/cmd/mount2/file.go index 416d5d59d..e40114c10 100644 --- a/cmd/mount2/file.go +++ b/cmd/mount2/file.go @@ -45,7 +45,7 @@ func newFileHandle(h vfs.Handle, fsys *FS) *FileHandle { } } -// Check interface satistfied +// Check interface satisfied var _ fusefs.FileHandle = (*FileHandle)(nil) // The String method is for debug printing. diff --git a/cmd/mount2/node.go b/cmd/mount2/node.go index 8619df10f..e1369607f 100644 --- a/cmd/mount2/node.go +++ b/cmd/mount2/node.go @@ -258,7 +258,7 @@ var _ fusefs.DirStream = (*dirStream)(nil) // Readdir opens a stream of directory entries. // -// Readdir essentiallly returns a list of strings, and it is allowed +// Readdir essentially returns a list of strings, and it is allowed // for Readdir to return different results from Lookup. For example, // you can return nothing for Readdir ("ls my-fuse-mount" is empty), // while still implementing Lookup ("ls my-fuse-mount/a-specific-file" diff --git a/cmd/ncdu/ncdu.go b/cmd/ncdu/ncdu.go index 007c46ceb..7dc94a0fa 100644 --- a/cmd/ncdu/ncdu.go +++ b/cmd/ncdu/ncdu.go @@ -75,7 +75,7 @@ func helpText() (tr []string) { " d delete file/directory", } if !clipboard.Unsupported { - tr = append(tr, " y copy current path to clipbard") + tr = append(tr, " y copy current path to clipboard") } tr = append(tr, []string{ " Y display current path", diff --git a/cmd/serve/http/http_test.go b/cmd/serve/http/http_test.go index e5bc97214..269f56237 100644 --- a/cmd/serve/http/http_test.go +++ b/cmd/serve/http/http_test.go @@ -208,7 +208,7 @@ func TestGET(t *testing.T) { body, err := ioutil.ReadAll(resp.Body) require.NoError(t, err) - // Check we got a Last-Modifed header and that it is a valid date + // Check we got a Last-Modified header and that it is a valid date if test.Status == http.StatusOK || test.Status == http.StatusPartialContent { lastModified := resp.Header.Get("Last-Modified") assert.NotEqual(t, "", lastModified, test.Golden) diff --git a/cmd/serve/httplib/httplib.go b/cmd/serve/httplib/httplib.go index 4e19c14d2..2034908a0 100644 --- a/cmd/serve/httplib/httplib.go +++ b/cmd/serve/httplib/httplib.go @@ -61,7 +61,7 @@ to be used within the template to server pages: | .Name | The full path of a file/directory. | | .Title | Directory listing of .Name | | .Sort | The current sort used. This is changeable via ?sort= parameter | -| | Sort Options: namedirfist,name,size,time (default namedirfirst) | +| | Sort Options: namedirfirst,name,size,time (default namedirfirst) | | .Order | The current ordering used. This is changeable via ?order= parameter | | | Order Options: asc,desc (default asc) | | .Query | Currently unused. | diff --git a/cmd/serve/httplib/serve/dir.go b/cmd/serve/httplib/serve/dir.go index 835b9f67c..04cb6572e 100644 --- a/cmd/serve/httplib/serve/dir.go +++ b/cmd/serve/httplib/serve/dir.go @@ -132,7 +132,7 @@ func Error(what interface{}, w http.ResponseWriter, text string, err error) { } } -// ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and default is namedirfist/asc +// ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and default is namedirfirst/asc func (d *Directory) ProcessQueryParams(sortParm string, orderParm string) *Directory { d.Sort = sortParm d.Order = orderParm diff --git a/cmd/serve/proxy/proxy.go b/cmd/serve/proxy/proxy.go index bda2c7343..ff5730145 100644 --- a/cmd/serve/proxy/proxy.go +++ b/cmd/serve/proxy/proxy.go @@ -27,7 +27,7 @@ var Help = strings.Replace(` If you supply the parameter |--auth-proxy /path/to/program| then rclone will use that program to generate backends on the fly which then are used to authenticate incoming requests. This uses a simple -JSON based protocl with input on STDIN and output on STDOUT. +JSON based protocol with input on STDIN and output on STDOUT. **PLEASE NOTE:** |--auth-proxy| and |--authorized-keys| cannot be used together, if |--auth-proxy| is set the authorized keys option will be diff --git a/cmd/touch/touch_test.go b/cmd/touch/touch_test.go index 35d73bcfa..438bfb358 100644 --- a/cmd/touch/touch_test.go +++ b/cmd/touch/touch_test.go @@ -64,7 +64,7 @@ func TestTouchWithTimestamp(t *testing.T) { checkFile(t, r.Fremote, srcFileName, "") } -func TestTouchWithLognerTimestamp(t *testing.T) { +func TestTouchWithLongerTimestamp(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() diff --git a/docs/content/changelog.md b/docs/content/changelog.md index c6cfb823f..b588bb06a 100644 --- a/docs/content/changelog.md +++ b/docs/content/changelog.md @@ -63,7 +63,7 @@ description: "Rclone Changelog" * Add reverse proxy pluginsHandler for serving plugins (Chaitanya Bankanhal) * Add `mount/listmounts` option for listing current mounts (Chaitanya Bankanhal) * Add `operations/uploadfile` to upload a file through rc using encoding multipart/form-data (Chaitanya Bankanhal) - * Add `core/copmmand` to execute rclone terminal commands. (Chaitanya Bankanhal) + * Add `core/command` to execute rclone terminal commands. (Chaitanya Bankanhal) * `rclone check` * Add reporting of filenames for same/missing/changed (Nick Craig-Wood) * Make check command obey `--dry-run`/`-i`/`--interactive` (Nick Craig-Wood) @@ -172,7 +172,7 @@ description: "Rclone Changelog" * Google Cloud Storage * Add support for anonymous access (Kai Lüke) * Jottacloud - * Bring back legacy authentification for use with whitelabel versions (buengese) + * Bring back legacy authentication for use with whitelabel versions (buengese) * Switch to new api root - also implement a very ugly workaround for the DirMove failures (buengese) * Onedrive * Rework cancel of multipart uploads on rclone exit (Nick Craig-Wood) @@ -322,7 +322,7 @@ all the docs and Edward Barker for helping re-write the front page. * Add `--header` flag to add HTTP headers to every HTTP transaction (Nick Craig-Wood) * Add `--check-first` to do all checking before starting transfers (Nick Craig-Wood) * Add `--track-renames-strategy` for configurable matching criteria for `--track-renames` (Bernd Schoolmann) - * Add `--cutoff-mode` hard,soft,catious (Shing Kit Chan & Franklyn Tackitt) + * Add `--cutoff-mode` hard,soft,cautious (Shing Kit Chan & Franklyn Tackitt) * Filter flags (eg `--files-from -`) can read from stdin (fishbullet) * Add `--error-on-no-transfer` option (Jon Fautley) * Implement `--order-by xxx,mixed` for copying some small and some big files (Nick Craig-Wood) @@ -605,7 +605,7 @@ all the docs and Edward Barker for helping re-write the front page. * dbhashsum: Stop it returning UNSUPPORTED on dropbox (Nick Craig-Wood) * dedupe: Add missing modes to help string (Nick Craig-Wood) * operations - * Fix dedupe continuing on errors like insufficientFilePermisson (SezalAgrawal) + * Fix dedupe continuing on errors like insufficientFilePersimmon (SezalAgrawal) * Clear accounting before low level retry (Maciej Zimnoch) * Write debug message when hashes could not be checked (Ole Schütt) * Move interface assertion to tests to remove pflag dependency (Nick Craig-Wood) @@ -669,7 +669,7 @@ all the docs and Edward Barker for helping re-write the front page. * S3 * Re-implement multipart upload to fix memory issues (Nick Craig-Wood) * Add `--s3-copy-cutoff` for size to switch to multipart copy (Nick Craig-Wood) - * Add new region Asia Patific (Hong Kong) (Outvi V) + * Add new region Asia Pacific (Hong Kong) (Outvi V) * Reduce memory usage streaming files by reducing max stream upload size (Nick Craig-Wood) * Add `--s3-list-chunk` option for bucket listing (Thomas Kriechbaumer) * Force path style bucket access to off for AWS deprecation (Nick Craig-Wood) @@ -930,7 +930,7 @@ all the docs and Edward Barker for helping re-write the front page. * rcat: Fix slowdown on systems with multiple hashes (Nick Craig-Wood) * rcd: Fix permissions problems on cache directory with web gui download (Nick Craig-Wood) * Mount - * Default `--deamon-timout` to 15 minutes on macOS and FreeBSD (Nick Craig-Wood) + * Default `--daemon-timout` to 15 minutes on macOS and FreeBSD (Nick Craig-Wood) * Update docs to show mounting from root OK for bucket based (Nick Craig-Wood) * Remove nonseekable flag from write files (Nick Craig-Wood) * VFS @@ -1093,7 +1093,7 @@ all the docs and Edward Barker for helping re-write the front page. * Add client side TLS/SSL flags `--ca-cert`/`--client-cert`/`--client-key` (Nick Craig-Wood) * Implement `--suffix-keep-extension` for use with `--suffix` (Nick Craig-Wood) * build: - * Switch to semvar compliant version tags to be go modules compliant (Nick Craig-Wood) + * Switch to semver compliant version tags to be go modules compliant (Nick Craig-Wood) * Update to use go1.12.x for the build (Nick Craig-Wood) * serve dlna: Add connection manager service description to improve compatibility (Dan Walters) * lsf: Add 'e' format to show encrypted names and 'o' for original IDs (Nick Craig-Wood) diff --git a/docs/content/install.md b/docs/content/install.md index 75027571a..2e8e83c74 100644 --- a/docs/content/install.md +++ b/docs/content/install.md @@ -205,7 +205,7 @@ or the latest version (equivalent to the beta) with These will build the binary in `$(go env GOPATH)/bin` (`~/go/bin/rclone` by default) after downloading the source to the go module cache. Note - do **not** use the `-u` flag here. This causes go -to try to update the depencencies that rclone uses and sometimes these +to try to update the dependencies that rclone uses and sometimes these don't work with the current version of rclone. ## Installation with Ansible ## diff --git a/docs/content/jottacloud.md b/docs/content/jottacloud.md index 07438998b..361b7ce69 100644 --- a/docs/content/jottacloud.md +++ b/docs/content/jottacloud.md @@ -27,8 +27,8 @@ Note that the web interface may refer to this token as a JottaCli token. ### Legacy Setup If you are using one of the whitelabel versions (Elgiganten, Com Hem Cloud) you may not have the option -to generate a CLI token. In this case you'll have to use the legacy authentification. To to this select -yes when the setup asks for legacy authentification and enter your username and password. +to generate a CLI token. In this case you'll have to use the legacy authentication. To to this select +yes when the setup asks for legacy authentication and enter your username and password. The rest of the setup is identical to the default setup. Here is an example of how to make a remote called `remote` with the default setup. First run: @@ -59,7 +59,7 @@ y) Yes n) No y/n> n Remote config -Use legacy authentification?. +Use legacy authentication?. This is only required for certain whitelabel versions of Jottacloud and not recommended for normal users. y) Yes n) No (default) diff --git a/docs/content/overview.md b/docs/content/overview.md index 88c11376e..b6ff75e62 100644 --- a/docs/content/overview.md +++ b/docs/content/overview.md @@ -402,7 +402,7 @@ If the server can't do `CleanUp` then `rclone cleanup` will return an error. ‡‡ Note that while Box implements this it has to delete every file -idividually so it will be slower than emptying the trash via the WebUI +individually so it will be slower than emptying the trash via the WebUI ### ListR ### diff --git a/docs/content/qingstor.md b/docs/content/qingstor.md index 87140a951..f35dc15bc 100644 --- a/docs/content/qingstor.md +++ b/docs/content/qingstor.md @@ -259,7 +259,7 @@ Concurrency for multipart uploads. This is the number of chunks of the same file that are uploaded concurrently. -NB if you set this to > 1 then the checksums of multpart uploads +NB if you set this to > 1 then the checksums of multipart uploads become corrupted (the uploads themselves are not corrupted though). If you are uploading small numbers of large file over high speed link diff --git a/docs/content/s3.md b/docs/content/s3.md index b7005a52b..965be78fd 100644 --- a/docs/content/s3.md +++ b/docs/content/s3.md @@ -582,7 +582,7 @@ Region to connect to. - Asia Pacific (Mumbai) - Needs location constraint ap-south-1. - "ap-east-1" - - Asia Patific (Hong Kong) Region + - Asia Pacific (Hong Kong) Region - Needs location constraint ap-east-1. - "sa-east-1" - South America (Sao Paulo) Region @@ -1493,7 +1493,7 @@ All the objects shown will be marked for restore, then rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard It returns a list of status dictionaries with Remote and Status -keys. The Status will be OK if it was successfull or an error message +keys. The Status will be OK if it was successful or an error message if not. [ @@ -1794,7 +1794,7 @@ Choose a number from below, or type in your own value secret_access_key> <> ``` -6. Specify the endpoint for IBM COS. For Public IBM COS, choose from the option below. For On Premise IBM COS, enter an enpoint address. +6. Specify the endpoint for IBM COS. For Public IBM COS, choose from the option below. For On Premise IBM COS, enter an endpoint address. ``` Endpoint for IBM COS S3 API. Specify if using an IBM COS On Premise. @@ -1855,7 +1855,7 @@ Choose a number from below, or type in your own value location_constraint>1 ``` -9. Specify a canned ACL. IBM Cloud (Strorage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs. +9. Specify a canned ACL. IBM Cloud (Storage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs. ``` Canned ACL used when creating buckets and/or storing objects in S3. For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl diff --git a/docs/content/tardigrade.md b/docs/content/tardigrade.md index aaeac2b51..6af32d2ac 100644 --- a/docs/content/tardigrade.md +++ b/docs/content/tardigrade.md @@ -65,7 +65,7 @@ d) Delete this remote y/e/d> y ``` -### Setup with API key and passhprase +### Setup with API key and passphrase ``` No remotes found - make a new one diff --git a/fs/accounting/accounting.go b/fs/accounting/accounting.go index 44ad4d963..fddac4e7e 100644 --- a/fs/accounting/accounting.go +++ b/fs/accounting/accounting.go @@ -450,7 +450,7 @@ func shortenName(in string, size int) string { return in } name := []rune(in) - size-- // don't count elipsis rune + size-- // don't count ellipsis rune suffixLength := size / 2 prefixLength := size - suffixLength suffixStart := len(name) - suffixLength diff --git a/fs/accounting/token_bucket.go b/fs/accounting/token_bucket.go index 13058f68d..e920b2f19 100644 --- a/fs/accounting/token_bucket.go +++ b/fs/accounting/token_bucket.go @@ -99,7 +99,7 @@ func StartTokenTicker() { }() } -// limitBandwith sleeps for the correct amount of time for the passage +// limitBandwidth sleeps for the correct amount of time for the passage // of n bytes according to the current bandwidth limit func limitBandwidth(n int) { tokenBucketMu.Lock() diff --git a/fs/accounting/transfermap.go b/fs/accounting/transfermap.go index a9b9c6700..ef3801b15 100644 --- a/fs/accounting/transfermap.go +++ b/fs/accounting/transfermap.go @@ -91,7 +91,7 @@ func (tm *transferMap) _sortedSlice() []*Transfer { func (tm *transferMap) String(progress *inProgress, exclude *transferMap) string { tm.mu.RLock() defer tm.mu.RUnlock() - strngs := make([]string, 0, len(tm.items)) + stringList := make([]string, 0, len(tm.items)) for _, tr := range tm._sortedSlice() { if exclude != nil { exclude.mu.RLock() @@ -111,9 +111,9 @@ func (tm *transferMap) String(progress *inProgress, exclude *transferMap) string tm.name, ) } - strngs = append(strngs, " * "+out) + stringList = append(stringList, " * "+out) } - return strings.Join(strngs, "\n") + return strings.Join(stringList, "\n") } // progress returns total bytes read as well as the size. diff --git a/fs/bwtimetable.go b/fs/bwtimetable.go index f4f2c0686..df8562bcc 100644 --- a/fs/bwtimetable.go +++ b/fs/bwtimetable.go @@ -80,7 +80,7 @@ func parseWeekday(dayOfWeek string) (int, error) { // Set the bandwidth timetable. func (x *BwTimetable) Set(s string) error { // The timetable is formatted as: - // "dayOfWeek-hh:mm,bandwidth dayOfWeek-hh:mm,banwidth..." ex: "Mon-10:00,10G Mon-11:30,1G Tue-18:00,off" + // "dayOfWeek-hh:mm,bandwidth dayOfWeek-hh:mm,bandwidth..." ex: "Mon-10:00,10G Mon-11:30,1G Tue-18:00,off" // If only a single bandwidth identifier is provided, we assume constant bandwidth. if len(s) == 0 { diff --git a/fs/chunkedreader/chunkedreader.go b/fs/chunkedreader/chunkedreader.go index 7cad4caa9..1e29ea736 100644 --- a/fs/chunkedreader/chunkedreader.go +++ b/fs/chunkedreader/chunkedreader.go @@ -38,7 +38,7 @@ type ChunkedReader struct { // // An initialChunkSize of <= 0 will disable chunked reading. // If maxChunkSize is greater than initialChunkSize, the chunk size will be -// doubled after each chunk read with a maximun of maxChunkSize. +// doubled after each chunk read with a maximum of maxChunkSize. // A Seek or RangeSeek will reset the chunk size to it's initial value func New(ctx context.Context, o fs.Object, initialChunkSize int64, maxChunkSize int64) *ChunkedReader { if initialChunkSize <= 0 { diff --git a/fs/config.go b/fs/config.go index 7b10dce13..9697dbb07 100644 --- a/fs/config.go +++ b/fs/config.go @@ -62,7 +62,7 @@ type ConfigInfo struct { DeleteMode DeleteMode MaxDelete int64 TrackRenames bool // Track file renames. - TrackRenamesStrategy string // Comma separated list of stratgies used to track renames + TrackRenamesStrategy string // Comma separated list of strategies used to track renames LowLevelRetries int UpdateOlder bool // Skip files that are newer on the destination NoGzip bool // Disable compression diff --git a/fs/config/config_test.go b/fs/config/config_test.go index 69aa5d3b3..7bea45f5e 100644 --- a/fs/config/config_test.go +++ b/fs/config/config_test.go @@ -177,7 +177,7 @@ func TestNewRemoteName(t *testing.T) { assert.Equal(t, "newname", NewRemoteName()) } -func TestCreateUpatePasswordRemote(t *testing.T) { +func TestCreateUpdatePasswordRemote(t *testing.T) { defer testConfigFile(t, "update.conf")() for _, doObscure := range []bool{false, true} { diff --git a/fs/operations/multithread.go b/fs/operations/multithread.go index 85b91bd74..83e77765c 100644 --- a/fs/operations/multithread.go +++ b/fs/operations/multithread.go @@ -73,7 +73,7 @@ func (mc *multiThreadCopyState) copyStream(ctx context.Context, stream int) (err rc, err := NewReOpen(ctx, mc.src, fs.Config.LowLevelRetries, &fs.RangeOption{Start: start, End: end - 1}) if err != nil { - return errors.Wrap(err, "multpart copy: failed to open source") + return errors.Wrap(err, "multipart copy: failed to open source") } defer fs.CheckClose(rc, &err) @@ -89,29 +89,29 @@ func (mc *multiThreadCopyState) copyStream(ctx context.Context, stream int) (err if nr > 0 { err = mc.acc.AccountRead(nr) if err != nil { - return errors.Wrap(err, "multpart copy: accounting failed") + return errors.Wrap(err, "multipart copy: accounting failed") } nw, ew := mc.wc.WriteAt(buf[0:nr], offset) if nw > 0 { offset += int64(nw) } if ew != nil { - return errors.Wrap(ew, "multpart copy: write failed") + return errors.Wrap(ew, "multipart copy: write failed") } if nr != nw { - return errors.Wrap(io.ErrShortWrite, "multpart copy") + return errors.Wrap(io.ErrShortWrite, "multipart copy") } } if er != nil { if er != io.EOF { - return errors.Wrap(er, "multpart copy: read failed") + return errors.Wrap(er, "multipart copy: read failed") } break } } if offset != end { - return errors.Errorf("multpart copy: wrote %d bytes but expected to write %d", offset-start, end-start) + return errors.Errorf("multipart copy: wrote %d bytes but expected to write %d", offset-start, end-start) } fs.Debugf(mc.src, "multi-thread copy: stream %d/%d (%d-%d) size %v finished", stream+1, mc.streams, start, end, fs.SizeSuffix(end-start)) @@ -163,7 +163,7 @@ func multiThreadCopy(ctx context.Context, f fs.Fs, remote string, src fs.Object, // create write file handle mc.wc, err = openWriterAt(gCtx, remote, mc.size) if err != nil { - return nil, errors.Wrap(err, "multpart copy: failed to open destination") + return nil, errors.Wrap(err, "multipart copy: failed to open destination") } fs.Debugf(src, "Starting multi-thread copy with %d parts of size %v", mc.streams, fs.SizeSuffix(mc.partSize)) diff --git a/fs/sync/pipe.go b/fs/sync/pipe.go index d618ce23b..d28527e7b 100644 --- a/fs/sync/pipe.go +++ b/fs/sync/pipe.go @@ -32,7 +32,7 @@ type pipe struct { func newPipe(orderBy string, stats func(items int, totalSize int64), maxBacklog int) (*pipe, error) { if maxBacklog < 0 { - maxBacklog = (1 << (bits.UintSize - 1)) - 1 // largest posititive int + maxBacklog = (1 << (bits.UintSize - 1)) - 1 // largest positive int } less, fraction, err := newLess(orderBy) if err != nil { diff --git a/fs/sync/sync.go b/fs/sync/sync.go index 161f13790..3faab01a1 100644 --- a/fs/sync/sync.go +++ b/fs/sync/sync.go @@ -40,7 +40,7 @@ type syncCopyMove struct { deletersWg sync.WaitGroup // for delete before go routine deleteFilesCh chan fs.Object // channel to receive deletes if delete before trackRenames bool // set if we should do server side renames - trackRenamesStrategy trackRenamesStrategy // stratgies used for tracking renames + trackRenamesStrategy trackRenamesStrategy // strategies used for tracking renames dstFilesMu sync.Mutex // protect dstFiles dstFiles map[string]fs.Object // dst files, always filled srcFiles map[string]fs.Object // src files, only used if deleteBefore diff --git a/fs/walk/walk.go b/fs/walk/walk.go index efda8bbea..79fcbcd47 100644 --- a/fs/walk/walk.go +++ b/fs/walk/walk.go @@ -205,7 +205,7 @@ func (dm *dirMap) add(dir string, sent bool) { if !sent { return } - // currenSent == false && sent == true so needs overriding + // currentSent == false && sent == true so needs overriding } dm.m[dir] = sent // Add parents in as unsent diff --git a/fstest/run.go b/fstest/run.go index 9394d3785..532fe3a84 100644 --- a/fstest/run.go +++ b/fstest/run.go @@ -254,7 +254,7 @@ func (r *Run) WriteObjectTo(ctx context.Context, f fs.Fs, remote, content string } r.Mkdir(ctx, f) - // caclulate all hashes f supports for content + // calculate all hashes f supports for content hash, err := hash.NewMultiHasherTypes(f.Hashes()) if err != nil { r.Fatalf("Failed to make new multi hasher: %v", err) diff --git a/fstest/test_all/config.go b/fstest/test_all/config.go index 941c7caa5..ab271b58d 100644 --- a/fstest/test_all/config.go +++ b/fstest/test_all/config.go @@ -54,7 +54,7 @@ func (b *Backend) includeTest(t *Test) bool { // MakeRuns creates Run objects the Backend and Test // -// There can be several created, one for each combination of optionl +// There can be several created, one for each combination of optional // flags (eg FastList) func (b *Backend) MakeRuns(t *Test) (runs []*Run) { if !b.includeTest(t) { diff --git a/fstest/testserver/init.d/TestWebdavOwncloud b/fstest/testserver/init.d/TestWebdavOwncloud index e6b4fbdf7..c12cb2259 100755 --- a/fstest/testserver/init.d/TestWebdavOwncloud +++ b/fstest/testserver/init.d/TestWebdavOwncloud @@ -12,7 +12,7 @@ start() { docker run --rm -d --name $NAME \ -e "OWNCLOUD_DOMAIN=${OWNCLOUD_DOMAIN}" \ -e "OWNCLOUD_DB_TYPE=sqlite" \ - -e "OWNCLOUD_DB_NAME=oowncloud.db" \ + -e "OWNCLOUD_DB_NAME=owncloud.db" \ -e "OWNCLOUD_ADMIN_USERNAME=$USER" \ -e "OWNCLOUD_ADMIN_PASSWORD=$PASS" \ -e "OWNCLOUD_MYSQL_UTF8MB4=true" \ diff --git a/lib/encoder/encoder.go b/lib/encoder/encoder.go index f69878367..2724af73b 100644 --- a/lib/encoder/encoder.go +++ b/lib/encoder/encoder.go @@ -195,7 +195,7 @@ func (mask *MultiEncoder) Set(in string) error { return nil } -// Type returns a textual type of the MultiEncoder to satsify the pflag.Value interface +// Type returns a textual type of the MultiEncoder to satisfy the pflag.Value interface func (mask MultiEncoder) Type() string { return "Encoding" } diff --git a/lib/random/random.go b/lib/random/random.go index 9d95029c8..7da76f9e2 100644 --- a/lib/random/random.go +++ b/lib/random/random.go @@ -32,7 +32,7 @@ func String(n int) string { // memorable. The password is composed of printable ASCII characters // from the base64 alphabet. // -// Requres password strength in bits. +// Requires password strength in bits. // 64 is just about memorable // 128 is secure func Password(bits int) (password string, err error) { diff --git a/lib/ranges/ranges.go b/lib/ranges/ranges.go index 8fac3e39e..f72be5adf 100644 --- a/lib/ranges/ranges.go +++ b/lib/ranges/ranges.go @@ -102,7 +102,7 @@ func (rs *Ranges) coalesce(i int) { endChop = j + 1 } if endChop > startChop { - // chop the uneeded ranges out + // chop the unneeded ranges out copy(ranges[startChop:], ranges[endChop:]) *rs = ranges[:len(ranges)-endChop+startChop] } diff --git a/lib/structs/structs.go b/lib/structs/structs.go index b1a781bd8..d1cb676c9 100644 --- a/lib/structs/structs.go +++ b/lib/structs/structs.go @@ -15,7 +15,7 @@ import ( // and b will be set. // // This is useful for copying between almost identical structures that -// are requently present in auto generated code for cloud storage +// are frequently present in auto generated code for cloud storage // interfaces. func SetFrom(a, b interface{}) { ta := reflect.TypeOf(a).Elem() diff --git a/lib/structs/structs_test.go b/lib/structs/structs_test.go index 8ac8f12e6..9dffecf3f 100644 --- a/lib/structs/structs_test.go +++ b/lib/structs/structs_test.go @@ -57,7 +57,7 @@ func TestSetFrom(t *testing.T) { Matching: "a", OnlyA: "onlyA", MatchingInt: 1, - DifferentType: "suprise", + DifferentType: "surprise", } b := bType{ @@ -75,7 +75,7 @@ func TestSetFrom(t *testing.T) { Matching: "b", OnlyA: "onlyA", MatchingInt: 2, - DifferentType: "suprise", + DifferentType: "surprise", }, a) assert.Equal(t, bBefore, b) @@ -86,7 +86,7 @@ func TestSetFromReversed(t *testing.T) { Matching: "a", OnlyA: "onlyA", MatchingInt: 1, - DifferentType: "suprise", + DifferentType: "surprise", } aBefore := a diff --git a/notes.txt b/notes.txt index 479f9d3cf..d467441e7 100644 --- a/notes.txt +++ b/notes.txt @@ -12,7 +12,7 @@ Todo * Might be quicker to check md5sums first? for swift <-> swift certainly, and maybe for small files * FIXME Make NewFs to return err.IsAnObject so can put the LimitedFs creation in common code? Or try for as much as possible? - * FIXME Account all the transactons (ls etc) using a different + * FIXME Account all the transactions (ls etc) using a different Roundtripper wrapper which wraps the transactions? Ideas diff --git a/vfs/dir.go b/vfs/dir.go index 972f75b23..7c7207d19 100644 --- a/vfs/dir.go +++ b/vfs/dir.go @@ -121,7 +121,7 @@ func (d *Dir) Inode() uint64 { return d.inode } -// Node returns the Node assocuated with this - satisfies Noder interface +// Node returns the Node associated with this - satisfies Noder interface func (d *Dir) Node() Node { return d } @@ -254,7 +254,7 @@ func (d *Dir) countActiveWriters() (writers int) { } // age returns the duration since the last time the directory contents -// was read and the content is cosidered stale. age will be 0 and +// was read and the content is considered stale. age will be 0 and // stale true if the last read time is empty. // age must be called with d.mu held. func (d *Dir) _age(when time.Time) (age time.Duration, stale bool) { @@ -528,7 +528,7 @@ func (d *Dir) stat(leaf string) (Node, error) { // duplicate case insensitive match is an error return nil, errors.Errorf("duplicate filename %q detected with --vfs-case-insensitive set", leaf) } - // found a case insenstive match + // found a case insensitive match ok = true item = node } diff --git a/vfs/dir_handle.go b/vfs/dir_handle.go index b60065b2b..d2ef0f9e9 100644 --- a/vfs/dir_handle.go +++ b/vfs/dir_handle.go @@ -35,7 +35,7 @@ func (fh *DirHandle) Stat() (fi os.FileInfo, err error) { return fh.d, nil } -// Node returns the Node assocuated with this - satisfies Noder interface +// Node returns the Node associated with this - satisfies Noder interface func (fh *DirHandle) Node() Node { return fh.d } diff --git a/vfs/file.go b/vfs/file.go index c3e58311c..75f388868 100644 --- a/vfs/file.go +++ b/vfs/file.go @@ -135,7 +135,7 @@ func (f *File) Inode() uint64 { return f.inode } -// Node returns the Node assocuated with this - satisfies Noder interface +// Node returns the Node associated with this - satisfies Noder interface func (f *File) Node() Node { return f } @@ -499,7 +499,7 @@ func (f *File) openWrite(flags int) (fh *WriteFileHandle, err error) { return fh, nil } -// openRW open the file for read and write using a temporay file +// openRW open the file for read and write using a temporary file // // It uses the open flags passed in. func (f *File) openRW(flags int) (fh *RWFileHandle, err error) { @@ -611,7 +611,7 @@ func (f *File) Fs() fs.Fs { // O_CREATE create a new file if none exists. // O_EXCL used with O_CREATE, file must not exist // O_SYNC open for synchronous I/O. -// O_TRUNC if possible, truncate file when opene +// O_TRUNC if possible, truncate file when opened // // We ignore O_SYNC and O_EXCL func (f *File) Open(flags int) (fd Handle, err error) { diff --git a/vfs/help.go b/vfs/help.go index 7e34e6038..523ff22d3 100644 --- a/vfs/help.go +++ b/vfs/help.go @@ -149,7 +149,7 @@ In this mode all reads and writes are buffered to and from disk. When data is read from the remote this is buffered to disk as well. In this mode the files in the cache will be sparse files and rclone -will keep track of which bits of the files it has dowloaded. +will keep track of which bits of the files it has downloaded. So if an application only reads the starts of each file, then rclone will only buffer the start of the file. These files will appear to be diff --git a/vfs/read.go b/vfs/read.go index dcbae07d7..45437f5b4 100644 --- a/vfs/read.go +++ b/vfs/read.go @@ -98,7 +98,7 @@ func (fh *ReadFileHandle) String() string { return fh.file.String() + " (r)" } -// Node returns the Node assocuated with this - satisfies Noder interface +// Node returns the Node associated with this - satisfies Noder interface func (fh *ReadFileHandle) Node() Node { fh.mu.Lock() defer fh.mu.Unlock() diff --git a/vfs/read_write.go b/vfs/read_write.go index 7fa04a7d6..03d990c3a 100644 --- a/vfs/read_write.go +++ b/vfs/read_write.go @@ -118,7 +118,7 @@ func (fh *RWFileHandle) String() string { return fh.file.String() + " (rw)" } -// Node returns the Node assocuated with this - satisfies Noder interface +// Node returns the Node associated with this - satisfies Noder interface func (fh *RWFileHandle) Node() Node { fh.mu.Lock() defer fh.mu.Unlock() diff --git a/vfs/vfs.go b/vfs/vfs.go index d2d1d9d2b..0b1151b2f 100644 --- a/vfs/vfs.go +++ b/vfs/vfs.go @@ -108,7 +108,7 @@ type OsFiler interface { WriteString(s string) (n int, err error) } -// Handle is the interface statisified by open files or directories. +// Handle is the interface satisfied by open files or directories. // It is the methods on *os.File, plus a few more useful for FUSE // filingsystems. Not all of them are supported. type Handle interface { diff --git a/vfs/vfs_test.go b/vfs/vfs_test.go index afef12de4..6a64f5b1b 100644 --- a/vfs/vfs_test.go +++ b/vfs/vfs_test.go @@ -29,7 +29,7 @@ var ( // Constants uses in the tests const ( writeBackDelay = 100 * time.Millisecond // A short writeback delay for testing - waitForWritersDelay = 10 * time.Second // time to wait for exiting writiers + waitForWritersDelay = 10 * time.Second // time to wait for existing writers ) // TestMain drives the tests diff --git a/vfs/vfscache/cache.go b/vfs/vfscache/cache.go index eb181429a..5a5dd8fa6 100644 --- a/vfs/vfscache/cache.go +++ b/vfs/vfscache/cache.go @@ -56,7 +56,7 @@ type Cache struct { used int64 // total size of files in the cache outOfSpace bool // out of space cleanerKicked bool // some thread kicked the cleaner upon out of space - kickerMu sync.Mutex // mutex for clearnerKicked + kickerMu sync.Mutex // mutex for cleanerKicked kick chan struct{} // channel for kicking clear to start } @@ -69,7 +69,7 @@ type Cache struct { // go into the directory tree. type AddVirtualFn func(remote string, size int64, isDir bool) error -// New creates a new cache heirachy for fremote +// New creates a new cache hierarchy for fremote // // This starts background goroutines which can be cancelled with the // context passed in. @@ -336,7 +336,7 @@ func (c *Cache) Rename(name string, newName string, newObj fs.Object) (err error // Remove should be called if name is deleted // // This returns true if the file was in the transfer queue so may not -// have completedly uploaded yet. +// have completely uploaded yet. func (c *Cache) Remove(name string) (wasWriting bool) { name = clean(name) c.mu.Lock() @@ -461,7 +461,7 @@ func (c *Cache) removeNotInUse(item *Item, maxAge time.Duration, emptyOnly bool) // Retry failed resets during purgeClean() func (c *Cache) retryFailedResets() { - // Some items may have failed to reset becasue there was not enough space + // Some items may have failed to reset because there was not enough space // for saving the cache item's metadata. Redo the Reset()'s here now that // we may have some available space. if len(c.errItems) != 0 { @@ -625,7 +625,7 @@ func (c *Cache) clean(removeCleanFiles bool) { c.purgeOverQuota(int64(c.opt.CacheMaxSize)) // removeCleanFiles indicates that we got ENOSPC error - // We remove cache files that are not dirty if we are still avove the max cache size + // We remove cache files that are not dirty if we are still above the max cache size if removeCleanFiles { c.purgeClean(int64(c.opt.CacheMaxSize)) c.retryFailedResets() diff --git a/vfs/vfscache/cache_test.go b/vfs/vfscache/cache_test.go index e2b3f6d4b..1052a0f93 100644 --- a/vfs/vfscache/cache_test.go +++ b/vfs/vfscache/cache_test.go @@ -430,24 +430,24 @@ func TestCachePurgeOverQuota(t *testing.T) { func TestCachePurgeClean(t *testing.T) { r, c, cleanup := newItemTestCache(t) defer cleanup() - contents, obj, patato1 := newFile(t, r, c, "existing") + contents, obj, potato1 := newFile(t, r, c, "existing") _ = contents // Open the object to create metadata for it - require.NoError(t, patato1.Open(obj)) - require.NoError(t, patato1.Open(obj)) + require.NoError(t, potato1.Open(obj)) + require.NoError(t, potato1.Open(obj)) - size, err := patato1.GetSize() + size, err := potato1.GetSize() require.NoError(t, err) assert.Equal(t, int64(100), size) // Read something to instantiate the cache file buf := make([]byte, 10) - _, err = patato1.ReadAt(buf, 10) + _, err = potato1.ReadAt(buf, 10) require.NoError(t, err) // Test cache file present - _, err = os.Stat(patato1.c.toOSPath(patato1.name)) + _, err = os.Stat(potato1.c.toOSPath(potato1.name)) require.NoError(t, err) // Add some potatos @@ -476,8 +476,8 @@ func TestCachePurgeClean(t *testing.T) { }, itemSpaceAsString(c)) assert.Equal(t, int64(6), c.used) - require.NoError(t, patato1.Close(nil)) - require.NoError(t, patato1.Close(nil)) + require.NoError(t, potato1.Close(nil)) + require.NoError(t, potato1.Close(nil)) require.NoError(t, potato3.Close(nil)) // Remove all files now. The are all not in use. diff --git a/vfs/vfscache/downloaders/downloaders.go b/vfs/vfscache/downloaders/downloaders.go index 6ed57f6d1..399fcd3dd 100644 --- a/vfs/vfscache/downloaders/downloaders.go +++ b/vfs/vfscache/downloaders/downloaders.go @@ -298,7 +298,7 @@ func (dls *Downloaders) _ensureDownloader(r ranges.Range) (err error) { r = dls.item.FindMissing(r) // If the range is entirely present then we only need to start a - // dowloader if the window isn't full. + // downloader if the window isn't full. startNew := true if r.IsEmpty() { // Make a new range which includes the window @@ -561,7 +561,7 @@ func (dl *downloader) close(inErr error) (err error) { return nil } -// closed returns true if the downloader has been closed alread +// closed returns true if the downloader has been closed already func (dl *downloader) closed() bool { dl.mu.Lock() defer dl.mu.Unlock() diff --git a/vfs/vfscache/item.go b/vfs/vfscache/item.go index 22ca1801d..9163e8ce9 100644 --- a/vfs/vfscache/item.go +++ b/vfs/vfscache/item.go @@ -246,7 +246,7 @@ func (item *Item) _truncate(size int64) (err error) { // Use open handle if available fd := item.fd if fd == nil { - // If the metadata says we have some blockes cached then the + // If the metadata says we have some blocks cached then the // file should exist, so open without O_CREATE oFlags := os.O_WRONLY if item.info.Rs.Size() == 0 { @@ -647,7 +647,7 @@ func (item *Item) Close(storeFn StoreFn) (err error) { // If the file is dirty ensure any segments not transferred // are brought in first. // - // FIXME It would be nice to do this asynchronously howeve it + // FIXME It would be nice to do this asynchronously however it // would require keeping the downloaders alive after the item // has been closed if item.info.Dirty && item.o != nil { @@ -841,7 +841,7 @@ func (item *Item) _removeMeta(reason string) { // remove the cached file and empty the metadata // // This returns true if the file was in the transfer queue so may not -// have completedly uploaded yet. +// have completely uploaded yet. // // call with lock held func (item *Item) _remove(reason string) (wasWriting bool) { @@ -859,7 +859,7 @@ func (item *Item) _remove(reason string) (wasWriting bool) { // remove the cached file and empty the metadata // // This returns true if the file was in the transfer queue so may not -// have completedly uploaded yet. +// have completely uploaded yet. func (item *Item) remove(reason string) (wasWriting bool) { item.mu.Lock() defer item.mu.Unlock() diff --git a/vfs/vfscache/item_test.go b/vfs/vfscache/item_test.go index 933911ace..59561708b 100644 --- a/vfs/vfscache/item_test.go +++ b/vfs/vfscache/item_test.go @@ -548,7 +548,7 @@ func TestItemReadWrite(t *testing.T) { assert.False(t, item.remove(fileName)) }) - // Read it back randomly concurently + // Read it back randomly concurrently t.Run("RandomConcurrent", func(t *testing.T) { require.NoError(t, item.Open(obj)) assert.False(t, item.present()) diff --git a/vfs/vfscache/writeback/writeback.go b/vfs/vfscache/writeback/writeback.go index 59918e953..f4c367a58 100644 --- a/vfs/vfscache/writeback/writeback.go +++ b/vfs/vfscache/writeback/writeback.go @@ -15,7 +15,7 @@ import ( ) const ( - maxUploadDelay = 5 * time.Minute // max delay betwen upload attempts + maxUploadDelay = 5 * time.Minute // max delay between upload attempts ) // PutFn is the interface that item provides to store the data @@ -32,7 +32,7 @@ type WriteBack struct { lookup map[Handle]*writeBackItem // for getting a *writeBackItem from a Handle - writeBackItems are in here until cancelled opt *vfscommon.Options // VFS options timer *time.Timer // next scheduled time for the uploader - expiry time.Time // time the next item exires or IsZero + expiry time.Time // time the next item expires or IsZero uploads int // number of uploads in progress // read and written with atomic diff --git a/vfs/vfstest/dir.go b/vfs/vfstest/dir.go index d182cd24c..8c6273a1c 100644 --- a/vfs/vfstest/dir.go +++ b/vfs/vfstest/dir.go @@ -156,7 +156,7 @@ func TestDirModTime(t *testing.T) { run.rmdir(t, "dir") } -// TestDirCacheFlush tests fluching the dir cache +// TestDirCacheFlush tests flushing the dir cache func TestDirCacheFlush(t *testing.T) { run.skipIfNoFUSE(t) diff --git a/vfs/write.go b/vfs/write.go index bc8c01677..3b2c98905 100644 --- a/vfs/write.go +++ b/vfs/write.go @@ -97,7 +97,7 @@ func (fh *WriteFileHandle) String() string { return fh.file.String() + " (w)" } -// Node returns the Node assocuated with this - satisfies Noder interface +// Node returns the Node associated with this - satisfies Noder interface func (fh *WriteFileHandle) Node() Node { fh.mu.Lock() defer fh.mu.Unlock() @@ -122,7 +122,7 @@ func (fh *WriteFileHandle) WriteAt(p []byte, off int64) (n int, err error) { return fh.writeAt(p, off) } -// Implementatino of WriteAt - call with lock held +// Implementation of WriteAt - call with lock held func (fh *WriteFileHandle) writeAt(p []byte, off int64) (n int, err error) { // defer log.Trace(fh.remote, "len=%d off=%d", len(p), off)("n=%d, fh.off=%d, err=%v", &n, &fh.offset, &err) if fh.closed {