mirror of
https://github.com/rclone/rclone.git
synced 2024-11-22 13:26:11 +08:00
Use binary prefixes for size and rate units
Includes adding support for additional size input suffix Mi and MiB, treated equivalent to M. Extends binary suffix output with letter i, e.g. Ki and Mi. Centralizes creation of bit/byte unit strings.
This commit is contained in:
parent
2ec0c8d45f
commit
2925e1384c
|
@ -83,16 +83,16 @@ func init() {
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_wait_per_gb",
|
Name: "upload_wait_per_gb",
|
||||||
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
|
Help: `Additional time per GiB to wait after a failed complete upload to see if it appears.
|
||||||
|
|
||||||
Sometimes Amazon Drive gives an error when a file has been fully
|
Sometimes Amazon Drive gives an error when a file has been fully
|
||||||
uploaded but the file appears anyway after a little while. This
|
uploaded but the file appears anyway after a little while. This
|
||||||
happens sometimes for files over 1GB in size and nearly every time for
|
happens sometimes for files over 1 GiB in size and nearly every time for
|
||||||
files bigger than 10GB. This parameter controls the time rclone waits
|
files bigger than 10 GiB. This parameter controls the time rclone waits
|
||||||
for the file to appear.
|
for the file to appear.
|
||||||
|
|
||||||
The default value for this parameter is 3 minutes per GB, so by
|
The default value for this parameter is 3 minutes per GiB, so by
|
||||||
default it will wait 3 minutes for every GB uploaded to see if the
|
default it will wait 3 minutes for every GiB uploaded to see if the
|
||||||
file appears.
|
file appears.
|
||||||
|
|
||||||
You can disable this feature by setting it to 0. This may cause
|
You can disable this feature by setting it to 0. This may cause
|
||||||
|
@ -112,7 +112,7 @@ in this situation.`,
|
||||||
|
|
||||||
Files this size or more will be downloaded via their "tempLink". This
|
Files this size or more will be downloaded via their "tempLink". This
|
||||||
is to work around a problem with Amazon Drive which blocks downloads
|
is to work around a problem with Amazon Drive which blocks downloads
|
||||||
of files bigger than about 10GB. The default for this is 9GB which
|
of files bigger than about 10 GiB. The default for this is 9 GiB which
|
||||||
shouldn't need to be changed.
|
shouldn't need to be changed.
|
||||||
|
|
||||||
To download files above this threshold, rclone requests a "tempLink"
|
To download files above this threshold, rclone requests a "tempLink"
|
||||||
|
|
|
@ -47,8 +47,8 @@ const (
|
||||||
timeFormatIn = time.RFC3339
|
timeFormatIn = time.RFC3339
|
||||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||||
storageDefaultBaseURL = "blob.core.windows.net"
|
storageDefaultBaseURL = "blob.core.windows.net"
|
||||||
defaultChunkSize = 4 * fs.MebiByte
|
defaultChunkSize = 4 * fs.Mebi
|
||||||
maxChunkSize = 100 * fs.MebiByte
|
maxChunkSize = 100 * fs.Mebi
|
||||||
uploadConcurrency = 4
|
uploadConcurrency = 4
|
||||||
defaultAccessTier = azblob.AccessTierNone
|
defaultAccessTier = azblob.AccessTierNone
|
||||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||||
|
@ -129,11 +129,11 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_cutoff",
|
Name: "upload_cutoff",
|
||||||
Help: "Cutoff for switching to chunked upload (<= 256MB). (Deprecated)",
|
Help: "Cutoff for switching to chunked upload (<= 256 MiB). (Deprecated)",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Help: `Upload chunk size (<= 100MB).
|
Help: `Upload chunk size (<= 100 MiB).
|
||||||
|
|
||||||
Note that this is stored in memory and there may be up to
|
Note that this is stored in memory and there may be up to
|
||||||
"--transfers" chunks stored at once in memory.`,
|
"--transfers" chunks stored at once in memory.`,
|
||||||
|
@ -404,7 +404,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||||
const minChunkSize = fs.Byte
|
const minChunkSize = fs.SizeSuffixBase
|
||||||
if cs < minChunkSize {
|
if cs < minChunkSize {
|
||||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,10 +54,10 @@ const (
|
||||||
decayConstant = 1 // bigger for slower decay, exponential
|
decayConstant = 1 // bigger for slower decay, exponential
|
||||||
maxParts = 10000
|
maxParts = 10000
|
||||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||||
minChunkSize = 5 * fs.MebiByte
|
minChunkSize = 5 * fs.Mebi
|
||||||
defaultChunkSize = 96 * fs.MebiByte
|
defaultChunkSize = 96 * fs.Mebi
|
||||||
defaultUploadCutoff = 200 * fs.MebiByte
|
defaultUploadCutoff = 200 * fs.Mebi
|
||||||
largeFileCopyCutoff = 4 * fs.GibiByte // 5E9 is the max
|
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
||||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||||
memoryPoolUseMmap = false
|
memoryPoolUseMmap = false
|
||||||
)
|
)
|
||||||
|
@ -116,7 +116,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
||||||
|
|
||||||
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||||
|
|
||||||
This value should be set no larger than 4.657GiB (== 5GB).`,
|
This value should be set no larger than 4.657 GiB (== 5 GB).`,
|
||||||
Default: defaultUploadCutoff,
|
Default: defaultUploadCutoff,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
|
@ -126,7 +126,7 @@ This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||||
Any files larger than this that need to be server-side copied will be
|
Any files larger than this that need to be server-side copied will be
|
||||||
copied in chunks of this size.
|
copied in chunks of this size.
|
||||||
|
|
||||||
The minimum is 0 and the maximum is 4.6GB.`,
|
The minimum is 0 and the maximum is 4.6 GiB.`,
|
||||||
Default: largeFileCopyCutoff,
|
Default: largeFileCopyCutoff,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
|
|
|
@ -230,14 +230,14 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
||||||
//
|
//
|
||||||
// The number of bytes in the file being uploaded. Note that
|
// The number of bytes in the file being uploaded. Note that
|
||||||
// this header is required; you cannot leave it out and just
|
// this header is required; you cannot leave it out and just
|
||||||
// use chunked encoding. The minimum size of every part but
|
// use chunked encoding. The minimum size of every part but
|
||||||
// the last one is 100MB.
|
// the last one is 100 MB (100,000,000 bytes)
|
||||||
//
|
//
|
||||||
// X-Bz-Content-Sha1
|
// X-Bz-Content-Sha1
|
||||||
//
|
//
|
||||||
// The SHA1 checksum of the this part of the file. B2 will
|
// The SHA1 checksum of the this part of the file. B2 will
|
||||||
// check this when the part is uploaded, to make sure that the
|
// check this when the part is uploaded, to make sure that the
|
||||||
// data arrived correctly. The same SHA1 checksum must be
|
// data arrived correctly. The same SHA1 checksum must be
|
||||||
// passed to b2_finish_large_file.
|
// passed to b2_finish_large_file.
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
|
|
|
@ -126,7 +126,7 @@ func init() {
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_cutoff",
|
Name: "upload_cutoff",
|
||||||
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
Help: "Cutoff for switching to multipart upload (>= 50 MiB).",
|
||||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
|
@ -1286,7 +1286,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||||
|
|
||||||
// upload does a single non-multipart upload
|
// upload does a single non-multipart upload
|
||||||
//
|
//
|
||||||
// This is recommended for less than 50 MB of content
|
// This is recommended for less than 50 MiB of content
|
||||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
|
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
|
||||||
upload := api.UploadFile{
|
upload := api.UploadFile{
|
||||||
Name: o.fs.opt.Enc.FromStandardName(leaf),
|
Name: o.fs.opt.Enc.FromStandardName(leaf),
|
||||||
|
|
14
backend/cache/cache.go
vendored
14
backend/cache/cache.go
vendored
|
@ -98,14 +98,14 @@ changed, any downloaded chunks will be invalid and cache-chunk-path
|
||||||
will need to be cleared or unexpected EOF errors will occur.`,
|
will need to be cleared or unexpected EOF errors will occur.`,
|
||||||
Default: DefCacheChunkSize,
|
Default: DefCacheChunkSize,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "1m",
|
Value: "1M",
|
||||||
Help: "1MB",
|
Help: "1 MiB",
|
||||||
}, {
|
}, {
|
||||||
Value: "5M",
|
Value: "5M",
|
||||||
Help: "5 MB",
|
Help: "5 MiB",
|
||||||
}, {
|
}, {
|
||||||
Value: "10M",
|
Value: "10M",
|
||||||
Help: "10 MB",
|
Help: "10 MiB",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "info_age",
|
Name: "info_age",
|
||||||
|
@ -132,13 +132,13 @@ oldest chunks until it goes under this value.`,
|
||||||
Default: DefCacheTotalChunkSize,
|
Default: DefCacheTotalChunkSize,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "500M",
|
Value: "500M",
|
||||||
Help: "500 MB",
|
Help: "500 MiB",
|
||||||
}, {
|
}, {
|
||||||
Value: "1G",
|
Value: "1G",
|
||||||
Help: "1 GB",
|
Help: "1 GiB",
|
||||||
}, {
|
}, {
|
||||||
Value: "10G",
|
Value: "10G",
|
||||||
Help: "10 GB",
|
Help: "10 GiB",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "db_path",
|
Name: "db_path",
|
||||||
|
|
|
@ -155,7 +155,7 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Advanced: false,
|
Advanced: false,
|
||||||
Default: fs.SizeSuffix(2147483648), // 2GB
|
Default: fs.SizeSuffix(2147483648), // 2 GiB
|
||||||
Help: `Files larger than chunk size will be split in chunks.`,
|
Help: `Files larger than chunk size will be split in chunks.`,
|
||||||
}, {
|
}, {
|
||||||
Name: "name_format",
|
Name: "name_format",
|
||||||
|
@ -1448,7 +1448,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
|
||||||
c.accountBytes(size)
|
c.accountBytes(size)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
const bufLen = 1048576 // 1MB
|
const bufLen = 1048576 // 1 MiB
|
||||||
buf := make([]byte, bufLen)
|
buf := make([]byte, bufLen)
|
||||||
for size > 0 {
|
for size > 0 {
|
||||||
n := size
|
n := size
|
||||||
|
|
|
@ -33,7 +33,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
||||||
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
||||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||||
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
||||||
Size: int64(kilobytes) * int64(fs.KibiByte),
|
Size: int64(kilobytes) * int64(fs.Kibi),
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ import (
|
||||||
// Globals
|
// Globals
|
||||||
const (
|
const (
|
||||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||||
maxChunkSize = 8388608 // at 256KB and 8 MB.
|
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
||||||
|
|
||||||
bufferSize = 8388608
|
bufferSize = 8388608
|
||||||
heuristicBytes = 1048576
|
heuristicBytes = 1048576
|
||||||
|
|
|
@ -68,8 +68,8 @@ const (
|
||||||
defaultScope = "drive"
|
defaultScope = "drive"
|
||||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||||
minChunkSize = 256 * fs.KibiByte
|
minChunkSize = 256 * fs.Kibi
|
||||||
defaultChunkSize = 8 * fs.MebiByte
|
defaultChunkSize = 8 * fs.Mebi
|
||||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
||||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||||
|
@ -467,7 +467,7 @@ See: https://github.com/rclone/rclone/issues/3631
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: `Make upload limit errors be fatal
|
Help: `Make upload limit errors be fatal
|
||||||
|
|
||||||
At the time of writing it is only possible to upload 750GB of data to
|
At the time of writing it is only possible to upload 750 GiB of data to
|
||||||
Google Drive a day (this is an undocumented limit). When this limit is
|
Google Drive a day (this is an undocumented limit). When this limit is
|
||||||
reached Google Drive produces a slightly different error message. When
|
reached Google Drive produces a slightly different error message. When
|
||||||
this flag is set it causes these errors to be fatal. These will stop
|
this flag is set it causes these errors to be fatal. These will stop
|
||||||
|
@ -484,7 +484,7 @@ See: https://github.com/rclone/rclone/issues/3857
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: `Make download limit errors be fatal
|
Help: `Make download limit errors be fatal
|
||||||
|
|
||||||
At the time of writing it is only possible to download 10TB of data from
|
At the time of writing it is only possible to download 10 TiB of data from
|
||||||
Google Drive a day (this is an undocumented limit). When this limit is
|
Google Drive a day (this is an undocumented limit). When this limit is
|
||||||
reached Google Drive produces a slightly different error message. When
|
reached Google Drive produces a slightly different error message. When
|
||||||
this flag is set it causes these errors to be fatal. These will stop
|
this flag is set it causes these errors to be fatal. These will stop
|
||||||
|
|
|
@ -65,9 +65,9 @@ const (
|
||||||
// Upload chunk size - setting too small makes uploads slow.
|
// Upload chunk size - setting too small makes uploads slow.
|
||||||
// Chunks are buffered into memory for retries.
|
// Chunks are buffered into memory for retries.
|
||||||
//
|
//
|
||||||
// Speed vs chunk size uploading a 1 GB file on 2017-11-22
|
// Speed vs chunk size uploading a 1 GiB file on 2017-11-22
|
||||||
//
|
//
|
||||||
// Chunk Size MB, Speed Mbyte/s, % of max
|
// Chunk Size MiB, Speed MiByte/s, % of max
|
||||||
// 1 1.364 11%
|
// 1 1.364 11%
|
||||||
// 2 2.443 19%
|
// 2 2.443 19%
|
||||||
// 4 4.288 33%
|
// 4 4.288 33%
|
||||||
|
@ -82,11 +82,11 @@ const (
|
||||||
// 96 12.302 95%
|
// 96 12.302 95%
|
||||||
// 128 12.945 100%
|
// 128 12.945 100%
|
||||||
//
|
//
|
||||||
// Choose 48MB which is 91% of Maximum speed. rclone by
|
// Choose 48 MiB which is 91% of Maximum speed. rclone by
|
||||||
// default does 4 transfers so this should use 4*48MB = 192MB
|
// default does 4 transfers so this should use 4*48 MiB = 192 MiB
|
||||||
// by default.
|
// by default.
|
||||||
defaultChunkSize = 48 * fs.MebiByte
|
defaultChunkSize = 48 * fs.Mebi
|
||||||
maxChunkSize = 150 * fs.MebiByte
|
maxChunkSize = 150 * fs.Mebi
|
||||||
// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
|
// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
|
||||||
maxFileNameLength = 255
|
maxFileNameLength = 255
|
||||||
)
|
)
|
||||||
|
@ -164,7 +164,7 @@ Any files larger than this will be uploaded in chunks of this size.
|
||||||
|
|
||||||
Note that chunks are buffered in memory (one at a time) so rclone can
|
Note that chunks are buffered in memory (one at a time) so rclone can
|
||||||
deal with retries. Setting this larger will increase the speed
|
deal with retries. Setting this larger will increase the speed
|
||||||
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
slightly (at most 10%% for 128 MiB in tests) at the cost of using more
|
||||||
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||||
Default: defaultChunkSize,
|
Default: defaultChunkSize,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
@ -325,7 +325,7 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||||
const minChunkSize = fs.Byte
|
const minChunkSize = fs.SizeSuffixBase
|
||||||
if cs < minChunkSize {
|
if cs < minChunkSize {
|
||||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||||
}
|
}
|
||||||
|
|
|
@ -534,7 +534,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// About reports space usage (with a MB precision)
|
// About reports space usage (with a MiB precision)
|
||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
mount, err := f.client.MountsDetails(f.mountID)
|
mount, err := f.client.MountsDetails(f.mountID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -52,8 +52,8 @@ const (
|
||||||
driveTypePersonal = "personal"
|
driveTypePersonal = "personal"
|
||||||
driveTypeBusiness = "business"
|
driveTypeBusiness = "business"
|
||||||
driveTypeSharepoint = "documentLibrary"
|
driveTypeSharepoint = "documentLibrary"
|
||||||
defaultChunkSize = 10 * fs.MebiByte
|
defaultChunkSize = 10 * fs.Mebi
|
||||||
chunkSizeMultiple = 320 * fs.KibiByte
|
chunkSizeMultiple = 320 * fs.Kibi
|
||||||
|
|
||||||
regionGlobal = "global"
|
regionGlobal = "global"
|
||||||
regionUS = "us"
|
regionUS = "us"
|
||||||
|
@ -696,7 +696,7 @@ func errorHandler(resp *http.Response) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||||
const minChunkSize = fs.Byte
|
const minChunkSize = fs.SizeSuffixBase
|
||||||
if cs%chunkSizeMultiple != 0 {
|
if cs%chunkSizeMultiple != 0 {
|
||||||
return errors.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
|
return errors.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
|
||||||
}
|
}
|
||||||
|
@ -1885,11 +1885,11 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the content of a remote file within 4MB size in one single request
|
// Update the content of a remote file within 4 MiB size in one single request
|
||||||
// This function will set modtime after uploading, which will create a new version for the remote file
|
// This function will set modtime after uploading, which will create a new version for the remote file
|
||||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
|
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||||
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
||||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
|
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4 MiB")
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.Debugf(o, "Starting singlepart upload")
|
fs.Debugf(o, "Starting singlepart upload")
|
||||||
|
|
|
@ -88,7 +88,7 @@ func init() {
|
||||||
|
|
||||||
Note that these chunks are buffered in memory so increasing them will
|
Note that these chunks are buffered in memory so increasing them will
|
||||||
increase memory use.`,
|
increase memory use.`,
|
||||||
Default: 10 * fs.MebiByte,
|
Default: 10 * fs.Mebi,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
|
|
|
@ -35,7 +35,7 @@ const (
|
||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
maxSleep = 2 * time.Second
|
maxSleep = 2 * time.Second
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
defaultChunkSize = 48 * fs.MebiByte
|
defaultChunkSize = 48 * fs.Mebi
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
|
@ -80,7 +80,7 @@ func init() {
|
||||||
Help: `Cutoff for switching to chunked upload
|
Help: `Cutoff for switching to chunked upload
|
||||||
|
|
||||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||||
The minimum is 0 and the maximum is 5GB.`,
|
The minimum is 0 and the maximum is 5 GiB.`,
|
||||||
Default: defaultUploadCutoff,
|
Default: defaultUploadCutoff,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
|
|
|
@ -1016,7 +1016,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||||
Help: `Cutoff for switching to chunked upload
|
Help: `Cutoff for switching to chunked upload
|
||||||
|
|
||||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||||
The minimum is 0 and the maximum is 5GB.`,
|
The minimum is 0 and the maximum is 5 GiB.`,
|
||||||
Default: defaultUploadCutoff,
|
Default: defaultUploadCutoff,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
|
@ -1038,9 +1038,9 @@ Rclone will automatically increase the chunk size when uploading a
|
||||||
large file of known size to stay below the 10,000 chunks limit.
|
large file of known size to stay below the 10,000 chunks limit.
|
||||||
|
|
||||||
Files of unknown size are uploaded with the configured
|
Files of unknown size are uploaded with the configured
|
||||||
chunk_size. Since the default chunk size is 5MB and there can be at
|
chunk_size. Since the default chunk size is 5 MiB and there can be at
|
||||||
most 10,000 chunks, this means that by default the maximum size of
|
most 10,000 chunks, this means that by default the maximum size of
|
||||||
a file you can stream upload is 48GB. If you wish to stream upload
|
a file you can stream upload is 48 GiB. If you wish to stream upload
|
||||||
larger files then you will need to increase chunk_size.`,
|
larger files then you will need to increase chunk_size.`,
|
||||||
Default: minChunkSize,
|
Default: minChunkSize,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
@ -1066,7 +1066,7 @@ large file of a known size to stay below this number of chunks limit.
|
||||||
Any files larger than this that need to be server-side copied will be
|
Any files larger than this that need to be server-side copied will be
|
||||||
copied in chunks of this size.
|
copied in chunks of this size.
|
||||||
|
|
||||||
The minimum is 0 and the maximum is 5GB.`,
|
The minimum is 0 and the maximum is 5 GiB.`,
|
||||||
Default: fs.SizeSuffix(maxSizeForCopy),
|
Default: fs.SizeSuffix(maxSizeForCopy),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
|
@ -1270,7 +1270,7 @@ See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rcl
|
||||||
const (
|
const (
|
||||||
metaMtime = "Mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
|
metaMtime = "Mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
|
||||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||||
// The maximum size of object we can COPY - this should be 5GiB but is < 5GB for b2 compatibility
|
// The maximum size of object we can COPY - this should be 5 GiB but is < 5 GB for b2 compatibility
|
||||||
// See https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76
|
// See https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76
|
||||||
maxSizeForCopy = 4768 * 1024 * 1024
|
maxSizeForCopy = 4768 * 1024 * 1024
|
||||||
maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload
|
maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload
|
||||||
|
@ -2991,9 +2991,9 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||||
// calculate size of parts
|
// calculate size of parts
|
||||||
partSize := int(f.opt.ChunkSize)
|
partSize := int(f.opt.ChunkSize)
|
||||||
|
|
||||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||||
// buffers here (default 5MB). With a maximum number of parts (10,000) this will be a file of
|
// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
|
||||||
// 48GB which seems like a not too unreasonable limit.
|
// 48 GiB which seems like a not too unreasonable limit.
|
||||||
if size == -1 {
|
if size == -1 {
|
||||||
warnStreamUpload.Do(func() {
|
warnStreamUpload.Do(func() {
|
||||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||||
|
@ -3002,7 +3002,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||||
} else {
|
} else {
|
||||||
// Adjust partSize until the number of parts is small enough.
|
// Adjust partSize until the number of parts is small enough.
|
||||||
if size/int64(partSize) >= uploadParts {
|
if size/int64(partSize) >= uploadParts {
|
||||||
// Calculate partition size rounded up to the nearest MB
|
// Calculate partition size rounded up to the nearest MiB
|
||||||
partSize = int((((size / uploadParts) >> 20) + 1) << 20)
|
partSize = int((((size / uploadParts) >> 20) + 1) << 20)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -110,10 +110,10 @@ const (
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
apiPath = "/sf/v3" // add to endpoint to get API path
|
apiPath = "/sf/v3" // add to endpoint to get API path
|
||||||
tokenPath = "/oauth/token" // add to endpoint to get Token path
|
tokenPath = "/oauth/token" // add to endpoint to get Token path
|
||||||
minChunkSize = 256 * fs.KibiByte
|
minChunkSize = 256 * fs.Kibi
|
||||||
maxChunkSize = 2 * fs.GibiByte
|
maxChunkSize = 2 * fs.Gibi
|
||||||
defaultChunkSize = 64 * fs.MebiByte
|
defaultChunkSize = 64 * fs.Mebi
|
||||||
defaultUploadCutoff = 128 * fs.MebiByte
|
defaultUploadCutoff = 128 * fs.Mebi
|
||||||
)
|
)
|
||||||
|
|
||||||
// Generate a new oauth2 config which we will update when we know the TokenURL
|
// Generate a new oauth2 config which we will update when we know the TokenURL
|
||||||
|
|
|
@ -36,7 +36,7 @@ import (
|
||||||
const (
|
const (
|
||||||
directoryMarkerContentType = "application/directory" // content type of directory marker objects
|
directoryMarkerContentType = "application/directory" // content type of directory marker objects
|
||||||
listChunks = 1000 // chunk size to read directory listings
|
listChunks = 1000 // chunk size to read directory listings
|
||||||
defaultChunkSize = 5 * fs.GibiByte
|
defaultChunkSize = 5 * fs.Gibi
|
||||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ var SharedOptions = []fs.Option{{
|
||||||
Help: `Above this size files will be chunked into a _segments container.
|
Help: `Above this size files will be chunked into a _segments container.
|
||||||
|
|
||||||
Above this size files will be chunked into a _segments container. The
|
Above this size files will be chunked into a _segments container. The
|
||||||
default for this is 5GB which is its maximum value.`,
|
default for this is 5 GiB which is its maximum value.`,
|
||||||
Default: defaultChunkSize,
|
Default: defaultChunkSize,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
|
@ -56,7 +56,7 @@ default for this is 5GB which is its maximum value.`,
|
||||||
When doing streaming uploads (e.g. using rcat or mount) setting this
|
When doing streaming uploads (e.g. using rcat or mount) setting this
|
||||||
flag will cause the swift backend to not upload chunked files.
|
flag will cause the swift backend to not upload chunked files.
|
||||||
|
|
||||||
This will limit the maximum upload size to 5GB. However non chunked
|
This will limit the maximum upload size to 5 GiB. However non chunked
|
||||||
files are easier to deal with and have an MD5SUM.
|
files are easier to deal with and have an MD5SUM.
|
||||||
|
|
||||||
Rclone will still chunk files bigger than chunk_size when doing normal
|
Rclone will still chunk files bigger than chunk_size when doing normal
|
||||||
|
@ -419,7 +419,7 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||||
const minChunkSize = fs.Byte
|
const minChunkSize = fs.SizeSuffixBase
|
||||||
if cs < minChunkSize {
|
if cs < minChunkSize {
|
||||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,7 +87,7 @@ func (f *Fs) testWithChunk(t *testing.T) {
|
||||||
preConfChunkSize := f.opt.ChunkSize
|
preConfChunkSize := f.opt.ChunkSize
|
||||||
preConfChunk := f.opt.NoChunk
|
preConfChunk := f.opt.NoChunk
|
||||||
f.opt.NoChunk = false
|
f.opt.NoChunk = false
|
||||||
f.opt.ChunkSize = 1024 * fs.Byte
|
f.opt.ChunkSize = 1024 * fs.SizeSuffixBase
|
||||||
defer func() {
|
defer func() {
|
||||||
//restore old config after test
|
//restore old config after test
|
||||||
f.opt.ChunkSize = preConfChunkSize
|
f.opt.ChunkSize = preConfChunkSize
|
||||||
|
@ -117,7 +117,7 @@ func (f *Fs) testWithChunkFail(t *testing.T) {
|
||||||
preConfChunkSize := f.opt.ChunkSize
|
preConfChunkSize := f.opt.ChunkSize
|
||||||
preConfChunk := f.opt.NoChunk
|
preConfChunk := f.opt.NoChunk
|
||||||
f.opt.NoChunk = false
|
f.opt.NoChunk = false
|
||||||
f.opt.ChunkSize = 1024 * fs.Byte
|
f.opt.ChunkSize = 1024 * fs.SizeSuffixBase
|
||||||
segmentContainer := f.root + "_segments"
|
segmentContainer := f.root + "_segments"
|
||||||
defer func() {
|
defer func() {
|
||||||
//restore config
|
//restore config
|
||||||
|
@ -159,7 +159,7 @@ func (f *Fs) testCopyLargeObject(t *testing.T) {
|
||||||
preConfChunkSize := f.opt.ChunkSize
|
preConfChunkSize := f.opt.ChunkSize
|
||||||
preConfChunk := f.opt.NoChunk
|
preConfChunk := f.opt.NoChunk
|
||||||
f.opt.NoChunk = false
|
f.opt.NoChunk = false
|
||||||
f.opt.ChunkSize = 1024 * fs.Byte
|
f.opt.ChunkSize = 1024 * fs.SizeSuffixBase
|
||||||
defer func() {
|
defer func() {
|
||||||
//restore old config after test
|
//restore old config after test
|
||||||
f.opt.ChunkSize = preConfChunkSize
|
f.opt.ChunkSize = preConfChunkSize
|
||||||
|
|
|
@ -36,8 +36,8 @@ If you supply the |--rmdirs| flag, it will remove all empty directories along wi
|
||||||
You can also use the separate command |rmdir| or |rmdirs| to
|
You can also use the separate command |rmdir| or |rmdirs| to
|
||||||
delete empty directories only.
|
delete empty directories only.
|
||||||
|
|
||||||
For example, to delete all files bigger than 100MBytes, you may first want to check what
|
For example, to delete all files bigger than 100 MiB, you may first want to
|
||||||
would be deleted (use either):
|
check what would be deleted (use either):
|
||||||
|
|
||||||
rclone --min-size 100M lsl remote:path
|
rclone --min-size 100M lsl remote:path
|
||||||
rclone --dry-run --min-size 100M delete remote:path
|
rclone --dry-run --min-size 100M delete remote:path
|
||||||
|
@ -46,8 +46,8 @@ Then proceed with the actual delete:
|
||||||
|
|
||||||
rclone --min-size 100M delete remote:path
|
rclone --min-size 100M delete remote:path
|
||||||
|
|
||||||
That reads "delete everything with a minimum size of 100 MB", hence
|
That reads "delete everything with a minimum size of 100 MiB", hence
|
||||||
delete all files bigger than 100MBytes.
|
delete all files bigger than 100 MiB.
|
||||||
|
|
||||||
**Important**: Since this can cause data loss, test first with the
|
**Important**: Since this can cause data loss, test first with the
|
||||||
|--dry-run| or the |--interactive|/|-i| flag.
|
|--dry-run| or the |--interactive|/|-i| flag.
|
||||||
|
|
|
@ -206,9 +206,9 @@ When that happens, it is the user's responsibility to stop the mount manually.
|
||||||
The size of the mounted file system will be set according to information retrieved
|
The size of the mounted file system will be set according to information retrieved
|
||||||
from the remote, the same as returned by the [rclone about](https://rclone.org/commands/rclone_about/)
|
from the remote, the same as returned by the [rclone about](https://rclone.org/commands/rclone_about/)
|
||||||
command. Remotes with unlimited storage may report the used size only,
|
command. Remotes with unlimited storage may report the used size only,
|
||||||
then an additional 1PB of free space is assumed. If the remote does not
|
then an additional 1 PiB of free space is assumed. If the remote does not
|
||||||
[support](https://rclone.org/overview/#optional-features) the about feature
|
[support](https://rclone.org/overview/#optional-features) the about feature
|
||||||
at all, then 1PB is set as both the total and the free size.
|
at all, then 1 PiB is set as both the total and the free size.
|
||||||
|
|
||||||
**Note**: As of |rclone| 1.52.2, |rclone mount| now requires Go version 1.13
|
**Note**: As of |rclone| 1.52.2, |rclone mount| now requires Go version 1.13
|
||||||
or newer on some platforms depending on the underlying FUSE library in use.
|
or newer on some platforms depending on the underlying FUSE library in use.
|
||||||
|
|
|
@ -385,9 +385,9 @@ func (u *UI) Draw() error {
|
||||||
}
|
}
|
||||||
if u.showDirAverageSize {
|
if u.showDirAverageSize {
|
||||||
if averageSize > 0 {
|
if averageSize > 0 {
|
||||||
extras += fmt.Sprintf("%8v ", fs.SizeSuffix(int64(averageSize)))
|
extras += fmt.Sprintf("%9v ", fs.SizeSuffix(int64(averageSize)))
|
||||||
} else {
|
} else {
|
||||||
extras += " "
|
extras += " "
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -406,7 +406,7 @@ func (u *UI) Draw() error {
|
||||||
}
|
}
|
||||||
extras += "[" + graph[graphBars-bars:2*graphBars-bars] + "] "
|
extras += "[" + graph[graphBars-bars:2*graphBars-bars] + "] "
|
||||||
}
|
}
|
||||||
Linef(0, y, w, fg, bg, ' ', "%c %8v %s%c%s%s", fileFlag, fs.SizeSuffix(size), extras, mark, path.Base(entry.Remote()), message)
|
Linef(0, y, w, fg, bg, ' ', "%c %9v %s%c%s%s", fileFlag, fs.SizeSuffix(size), extras, mark, path.Base(entry.Remote()), message)
|
||||||
y++
|
y++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -367,7 +367,7 @@ footer {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
function readableFileSize(size) {
|
function readableFileSize(size) {
|
||||||
var units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'];
|
var units = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'];
|
||||||
var i = 0;
|
var i = 0;
|
||||||
while(size >= 1024) {
|
while(size >= 1024) {
|
||||||
size /= 1024;
|
size /= 1024;
|
||||||
|
|
|
@ -44,7 +44,7 @@ var commandDefinition = &cobra.Command{
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Total objects: %d\n", results.Count)
|
fmt.Printf("Total objects: %d\n", results.Count)
|
||||||
fmt.Printf("Total size: %s (%d Bytes)\n", fs.SizeSuffix(results.Bytes).Unit("Bytes"), results.Bytes)
|
fmt.Printf("Total size: %s (%d bytes)\n", fs.SizeSuffix(results.Bytes).ByteUnit(), results.Bytes)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
|
@ -227,16 +227,16 @@ Checkpoint for internal polling (debug).
|
||||||
|
|
||||||
#### --acd-upload-wait-per-gb
|
#### --acd-upload-wait-per-gb
|
||||||
|
|
||||||
Additional time per GB to wait after a failed complete upload to see if it appears.
|
Additional time per GiB to wait after a failed complete upload to see if it appears.
|
||||||
|
|
||||||
Sometimes Amazon Drive gives an error when a file has been fully
|
Sometimes Amazon Drive gives an error when a file has been fully
|
||||||
uploaded but the file appears anyway after a little while. This
|
uploaded but the file appears anyway after a little while. This
|
||||||
happens sometimes for files over 1GB in size and nearly every time for
|
happens sometimes for files over 1 GiB in size and nearly every time for
|
||||||
files bigger than 10GB. This parameter controls the time rclone waits
|
files bigger than 10 GiB. This parameter controls the time rclone waits
|
||||||
for the file to appear.
|
for the file to appear.
|
||||||
|
|
||||||
The default value for this parameter is 3 minutes per GB, so by
|
The default value for this parameter is 3 minutes per GiB, so by
|
||||||
default it will wait 3 minutes for every GB uploaded to see if the
|
default it will wait 3 minutes for every GiB uploaded to see if the
|
||||||
file appears.
|
file appears.
|
||||||
|
|
||||||
You can disable this feature by setting it to 0. This may cause
|
You can disable this feature by setting it to 0. This may cause
|
||||||
|
@ -260,7 +260,7 @@ Files >= this size will be downloaded via their tempLink.
|
||||||
|
|
||||||
Files this size or more will be downloaded via their "tempLink". This
|
Files this size or more will be downloaded via their "tempLink". This
|
||||||
is to work around a problem with Amazon Drive which blocks downloads
|
is to work around a problem with Amazon Drive which blocks downloads
|
||||||
of files bigger than about 10GB. The default for this is 9GB which
|
of files bigger than about 10 GiB. The default for this is 9 GiB which
|
||||||
shouldn't need to be changed.
|
shouldn't need to be changed.
|
||||||
|
|
||||||
To download files above this threshold, rclone requests a "tempLink"
|
To download files above this threshold, rclone requests a "tempLink"
|
||||||
|
@ -299,7 +299,7 @@ Amazon Drive has an internal limit of file sizes that can be uploaded
|
||||||
to the service. This limit is not officially published, but all files
|
to the service. This limit is not officially published, but all files
|
||||||
larger than this will fail.
|
larger than this will fail.
|
||||||
|
|
||||||
At the time of writing (Jan 2016) is in the area of 50GB per file.
|
At the time of writing (Jan 2016) is in the area of 50 GiB per file.
|
||||||
This means that larger files are likely to fail.
|
This means that larger files are likely to fail.
|
||||||
|
|
||||||
Unfortunately there is no way for rclone to see that this failure is
|
Unfortunately there is no way for rclone to see that this failure is
|
||||||
|
|
|
@ -269,7 +269,7 @@ Leave blank normally.
|
||||||
|
|
||||||
#### --azureblob-upload-cutoff
|
#### --azureblob-upload-cutoff
|
||||||
|
|
||||||
Cutoff for switching to chunked upload (<= 256MB). (Deprecated)
|
Cutoff for switching to chunked upload (<= 256 MiB). (Deprecated)
|
||||||
|
|
||||||
- Config: upload_cutoff
|
- Config: upload_cutoff
|
||||||
- Env Var: RCLONE_AZUREBLOB_UPLOAD_CUTOFF
|
- Env Var: RCLONE_AZUREBLOB_UPLOAD_CUTOFF
|
||||||
|
@ -278,7 +278,7 @@ Cutoff for switching to chunked upload (<= 256MB). (Deprecated)
|
||||||
|
|
||||||
#### --azureblob-chunk-size
|
#### --azureblob-chunk-size
|
||||||
|
|
||||||
Upload chunk size (<= 100MB).
|
Upload chunk size (<= 100 MiB).
|
||||||
|
|
||||||
Note that this is stored in memory and there may be up to
|
Note that this is stored in memory and there may be up to
|
||||||
"--transfers" chunks stored at once in memory.
|
"--transfers" chunks stored at once in memory.
|
||||||
|
|
|
@ -155,8 +155,8 @@ depending on your hardware, how big the files are, how much you want
|
||||||
to load your computer, etc. The default of `--transfers 4` is
|
to load your computer, etc. The default of `--transfers 4` is
|
||||||
definitely too low for Backblaze B2 though.
|
definitely too low for Backblaze B2 though.
|
||||||
|
|
||||||
Note that uploading big files (bigger than 200 MB by default) will use
|
Note that uploading big files (bigger than 200 MiB by default) will use
|
||||||
a 96 MB RAM buffer by default. There can be at most `--transfers` of
|
a 96 MiB RAM buffer by default. There can be at most `--transfers` of
|
||||||
these in use at any moment, so this sets the upper limit on the memory
|
these in use at any moment, so this sets the upper limit on the memory
|
||||||
used.
|
used.
|
||||||
|
|
||||||
|
@ -401,7 +401,7 @@ Cutoff for switching to chunked upload.
|
||||||
|
|
||||||
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||||
|
|
||||||
This value should be set no larger than 4.657GiB (== 5GB).
|
This value should be set no larger than 4.657 GiB (== 5 GB).
|
||||||
|
|
||||||
- Config: upload_cutoff
|
- Config: upload_cutoff
|
||||||
- Env Var: RCLONE_B2_UPLOAD_CUTOFF
|
- Env Var: RCLONE_B2_UPLOAD_CUTOFF
|
||||||
|
@ -415,7 +415,7 @@ Cutoff for switching to multipart copy
|
||||||
Any files larger than this that need to be server-side copied will be
|
Any files larger than this that need to be server-side copied will be
|
||||||
copied in chunks of this size.
|
copied in chunks of this size.
|
||||||
|
|
||||||
The minimum is 0 and the maximum is 4.6GB.
|
The minimum is 0 and the maximum is 4.6 GiB.
|
||||||
|
|
||||||
- Config: copy_cutoff
|
- Config: copy_cutoff
|
||||||
- Env Var: RCLONE_B2_COPY_CUTOFF
|
- Env Var: RCLONE_B2_COPY_CUTOFF
|
||||||
|
|
|
@ -225,10 +225,10 @@ as they can't be used in JSON strings.
|
||||||
|
|
||||||
### Transfers ###
|
### Transfers ###
|
||||||
|
|
||||||
For files above 50MB rclone will use a chunked transfer. Rclone will
|
For files above 50 MiB rclone will use a chunked transfer. Rclone will
|
||||||
upload up to `--transfers` chunks at the same time (shared among all
|
upload up to `--transfers` chunks at the same time (shared among all
|
||||||
the multipart uploads). Chunks are buffered in memory and are
|
the multipart uploads). Chunks are buffered in memory and are
|
||||||
normally 8MB so increasing `--transfers` will increase memory use.
|
normally 8 MiB so increasing `--transfers` will increase memory use.
|
||||||
|
|
||||||
### Deleting files ###
|
### Deleting files ###
|
||||||
|
|
||||||
|
@ -369,7 +369,7 @@ Fill in for rclone to use a non root folder as its starting point.
|
||||||
|
|
||||||
#### --box-upload-cutoff
|
#### --box-upload-cutoff
|
||||||
|
|
||||||
Cutoff for switching to multipart upload (>= 50MB).
|
Cutoff for switching to multipart upload (>= 50 MiB).
|
||||||
|
|
||||||
- Config: upload_cutoff
|
- Config: upload_cutoff
|
||||||
- Env Var: RCLONE_BOX_UPLOAD_CUTOFF
|
- Env Var: RCLONE_BOX_UPLOAD_CUTOFF
|
||||||
|
|
|
@ -70,11 +70,11 @@ password:
|
||||||
The size of a chunk. Lower value good for slow connections but can affect seamless reading.
|
The size of a chunk. Lower value good for slow connections but can affect seamless reading.
|
||||||
Default: 5M
|
Default: 5M
|
||||||
Choose a number from below, or type in your own value
|
Choose a number from below, or type in your own value
|
||||||
1 / 1MB
|
1 / 1 MiB
|
||||||
\ "1m"
|
\ "1M"
|
||||||
2 / 5 MB
|
2 / 5 MiB
|
||||||
\ "5M"
|
\ "5M"
|
||||||
3 / 10 MB
|
3 / 10 MiB
|
||||||
\ "10M"
|
\ "10M"
|
||||||
chunk_size> 2
|
chunk_size> 2
|
||||||
How much time should object info (file size, file hashes, etc.) be stored in cache. Use a very high value if you don't plan on changing the source FS from outside the cache.
|
How much time should object info (file size, file hashes, etc.) be stored in cache. Use a very high value if you don't plan on changing the source FS from outside the cache.
|
||||||
|
@ -91,11 +91,11 @@ info_age> 2
|
||||||
The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted.
|
The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted.
|
||||||
Default: 10G
|
Default: 10G
|
||||||
Choose a number from below, or type in your own value
|
Choose a number from below, or type in your own value
|
||||||
1 / 500 MB
|
1 / 500 MiB
|
||||||
\ "500M"
|
\ "500M"
|
||||||
2 / 1 GB
|
2 / 1 GiB
|
||||||
\ "1G"
|
\ "1G"
|
||||||
3 / 10 GB
|
3 / 10 GiB
|
||||||
\ "10G"
|
\ "10G"
|
||||||
chunk_total_size> 3
|
chunk_total_size> 3
|
||||||
Remote config
|
Remote config
|
||||||
|
@ -364,11 +364,11 @@ will need to be cleared or unexpected EOF errors will occur.
|
||||||
- Default: 5M
|
- Default: 5M
|
||||||
- Examples:
|
- Examples:
|
||||||
- "1m"
|
- "1m"
|
||||||
- 1MB
|
- 1 MiB
|
||||||
- "5M"
|
- "5M"
|
||||||
- 5 MB
|
- 5 MiB
|
||||||
- "10M"
|
- "10M"
|
||||||
- 10 MB
|
- 10 MiB
|
||||||
|
|
||||||
#### --cache-info-age
|
#### --cache-info-age
|
||||||
|
|
||||||
|
@ -401,11 +401,11 @@ oldest chunks until it goes under this value.
|
||||||
- Default: 10G
|
- Default: 10G
|
||||||
- Examples:
|
- Examples:
|
||||||
- "500M"
|
- "500M"
|
||||||
- 500 MB
|
- 500 MiB
|
||||||
- "1G"
|
- "1G"
|
||||||
- 1 GB
|
- 1 GiB
|
||||||
- "10G"
|
- "10G"
|
||||||
- 10 GB
|
- 10 GiB
|
||||||
|
|
||||||
### Advanced Options
|
### Advanced Options
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||||
Enter a string value. Press Enter for the default ("").
|
Enter a string value. Press Enter for the default ("").
|
||||||
remote> remote:path
|
remote> remote:path
|
||||||
Files larger than chunk size will be split in chunks.
|
Files larger than chunk size will be split in chunks.
|
||||||
Enter a size with suffix k,M,G,T. Press Enter for the default ("2G").
|
Enter a size with suffix K,M,G,T. Press Enter for the default ("2G").
|
||||||
chunk_size> 100M
|
chunk_size> 100M
|
||||||
Choose how chunker handles hash sums. All modes but "none" require metadata.
|
Choose how chunker handles hash sums. All modes but "none" require metadata.
|
||||||
Enter a string value. Press Enter for the default ("md5").
|
Enter a string value. Press Enter for the default ("md5").
|
||||||
|
|
|
@ -23,7 +23,7 @@ If you supply the `--rmdirs` flag, it will remove all empty directories along wi
|
||||||
You can also use the separate command `rmdir` or `rmdirs` to
|
You can also use the separate command `rmdir` or `rmdirs` to
|
||||||
delete empty directories only.
|
delete empty directories only.
|
||||||
|
|
||||||
For example, to delete all files bigger than 100MBytes, you may first want to check what
|
For example, to delete all files bigger than 100 MiByte, you may first want to check what
|
||||||
would be deleted (use either):
|
would be deleted (use either):
|
||||||
|
|
||||||
rclone --min-size 100M lsl remote:path
|
rclone --min-size 100M lsl remote:path
|
||||||
|
@ -33,8 +33,8 @@ Then proceed with the actual delete:
|
||||||
|
|
||||||
rclone --min-size 100M delete remote:path
|
rclone --min-size 100M delete remote:path
|
||||||
|
|
||||||
That reads "delete everything with a minimum size of 100 MB", hence
|
That reads "delete everything with a minimum size of 100 MiB", hence
|
||||||
delete all files bigger than 100MBytes.
|
delete all files bigger than 100 MiByte.
|
||||||
|
|
||||||
**Important**: Since this can cause data loss, test first with the
|
**Important**: Since this can cause data loss, test first with the
|
||||||
`--dry-run` or the `--interactive`/`-i` flag.
|
`--dry-run` or the `--interactive`/`-i` flag.
|
||||||
|
|
|
@ -56,9 +56,9 @@ When that happens, it is the user's responsibility to stop the mount manually.
|
||||||
The size of the mounted file system will be set according to information retrieved
|
The size of the mounted file system will be set according to information retrieved
|
||||||
from the remote, the same as returned by the [rclone about](https://rclone.org/commands/rclone_about/)
|
from the remote, the same as returned by the [rclone about](https://rclone.org/commands/rclone_about/)
|
||||||
command. Remotes with unlimited storage may report the used size only,
|
command. Remotes with unlimited storage may report the used size only,
|
||||||
then an additional 1PB of free space is assumed. If the remote does not
|
then an additional 1 PiB of free space is assumed. If the remote does not
|
||||||
[support](https://rclone.org/overview/#optional-features) the about feature
|
[support](https://rclone.org/overview/#optional-features) the about feature
|
||||||
at all, then 1PB is set as both the total and the free size.
|
at all, then 1 PiB is set as both the total and the free size.
|
||||||
|
|
||||||
**Note**: As of `rclone` 1.52.2, `rclone mount` now requires Go version 1.13
|
**Note**: As of `rclone` 1.52.2, `rclone mount` now requires Go version 1.13
|
||||||
or newer on some platforms depending on the underlying FUSE library in use.
|
or newer on some platforms depending on the underlying FUSE library in use.
|
||||||
|
|
|
@ -627,7 +627,7 @@ approximately 2×10⁻³² of re-using a nonce.
|
||||||
|
|
||||||
#### Chunk
|
#### Chunk
|
||||||
|
|
||||||
Each chunk will contain 64kB of data, except for the last one which
|
Each chunk will contain 64 KiB of data, except for the last one which
|
||||||
may have less data. The data chunk is in standard NaCl SecretBox
|
may have less data. The data chunk is in standard NaCl SecretBox
|
||||||
format. SecretBox uses XSalsa20 and Poly1305 to encrypt and
|
format. SecretBox uses XSalsa20 and Poly1305 to encrypt and
|
||||||
authenticate messages.
|
authenticate messages.
|
||||||
|
@ -653,12 +653,12 @@ This uses a 32 byte (256 bit key) key derived from the user password.
|
||||||
|
|
||||||
49 bytes total
|
49 bytes total
|
||||||
|
|
||||||
1MB (1048576 bytes) file will encrypt to
|
1 MiB (1048576 bytes) file will encrypt to
|
||||||
|
|
||||||
* 32 bytes header
|
* 32 bytes header
|
||||||
* 16 chunks of 65568 bytes
|
* 16 chunks of 65568 bytes
|
||||||
|
|
||||||
1049120 bytes total (a 0.05% overhead). This is the overhead for big
|
1049120 bytes total (a 0.05% overhead). This is the overhead for big
|
||||||
files.
|
files.
|
||||||
|
|
||||||
### Name encryption
|
### Name encryption
|
||||||
|
|
|
@ -421,10 +421,10 @@ possibly signed sequence of decimal numbers, each with optional
|
||||||
fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid
|
fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid
|
||||||
time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||||
|
|
||||||
Options which use SIZE use kByte by default. However, a suffix of `b`
|
Options which use SIZE use KiByte (multiples of 1024 bytes) by default.
|
||||||
for bytes, `k` for kByte, `M` for MByte, `G` for GByte, `T` for
|
However, a suffix of `B` for Byte, `K` for KiByte, `M` for MiByte,
|
||||||
TByte and `P` for PByte may be used. These are the binary units, e.g.
|
`G` for GiByte, `T` for TiByte and `P` for PiByte may be used. These are
|
||||||
1, 2\*\*10, 2\*\*20, 2\*\*30 respectively.
|
the binary units, e.g. 1, 2\*\*10, 2\*\*20, 2\*\*30 respectively.
|
||||||
|
|
||||||
### --backup-dir=DIR ###
|
### --backup-dir=DIR ###
|
||||||
|
|
||||||
|
@ -467,23 +467,23 @@ This option controls the bandwidth limit. For example
|
||||||
|
|
||||||
--bwlimit 10M
|
--bwlimit 10M
|
||||||
|
|
||||||
would mean limit the upload and download bandwidth to 10 MByte/s.
|
would mean limit the upload and download bandwidth to 10 MiByte/s.
|
||||||
**NB** this is **bytes** per second not **bits** per second. To use a
|
**NB** this is **bytes** per second not **bits** per second. To use a
|
||||||
single limit, specify the desired bandwidth in kByte/s, or use a
|
single limit, specify the desired bandwidth in KiByte/s, or use a
|
||||||
suffix b|k|M|G. The default is `0` which means to not limit bandwidth.
|
suffix B|K|M|G|T|P. The default is `0` which means to not limit bandwidth.
|
||||||
|
|
||||||
The upload and download bandwidth can be specified seperately, as
|
The upload and download bandwidth can be specified seperately, as
|
||||||
`--bwlimit UP:DOWN`, so
|
`--bwlimit UP:DOWN`, so
|
||||||
|
|
||||||
--bwlimit 10M:100k
|
--bwlimit 10M:100k
|
||||||
|
|
||||||
would mean limit the upload bandwidth to 10 MByte/s and the download
|
would mean limit the upload bandwidth to 10 MiByte/s and the download
|
||||||
bandwidth to 100 kByte/s. Either limit can be "off" meaning no limit, so
|
bandwidth to 100 KiByte/s. Either limit can be "off" meaning no limit, so
|
||||||
to just limit the upload bandwidth you would use
|
to just limit the upload bandwidth you would use
|
||||||
|
|
||||||
--bwlimit 10M:off
|
--bwlimit 10M:off
|
||||||
|
|
||||||
this would limit the upload bandwidth to 10 MByte/s but the download
|
this would limit the upload bandwidth to 10 MiByte/s but the download
|
||||||
bandwidth would be unlimited.
|
bandwidth would be unlimited.
|
||||||
|
|
||||||
When specified as above the bandwidth limits last for the duration of
|
When specified as above the bandwidth limits last for the duration of
|
||||||
|
@ -505,19 +505,19 @@ working hours could be:
|
||||||
|
|
||||||
`--bwlimit "08:00,512k 12:00,10M 13:00,512k 18:00,30M 23:00,off"`
|
`--bwlimit "08:00,512k 12:00,10M 13:00,512k 18:00,30M 23:00,off"`
|
||||||
|
|
||||||
In this example, the transfer bandwidth will be set to 512 kByte/s
|
In this example, the transfer bandwidth will be set to 512 KiByte/s
|
||||||
at 8am every day. At noon, it will rise to 10 MByte/s, and drop back
|
at 8am every day. At noon, it will rise to 10 MiByte/s, and drop back
|
||||||
to 512 kByte/sec at 1pm. At 6pm, the bandwidth limit will be set to
|
to 512 KiByte/sec at 1pm. At 6pm, the bandwidth limit will be set to
|
||||||
30 MByte/s, and at 11pm it will be completely disabled (full speed).
|
30 MiByte/s, and at 11pm it will be completely disabled (full speed).
|
||||||
Anything between 11pm and 8am will remain unlimited.
|
Anything between 11pm and 8am will remain unlimited.
|
||||||
|
|
||||||
An example of timetable with `WEEKDAY` could be:
|
An example of timetable with `WEEKDAY` could be:
|
||||||
|
|
||||||
`--bwlimit "Mon-00:00,512 Fri-23:59,10M Sat-10:00,1M Sun-20:00,off"`
|
`--bwlimit "Mon-00:00,512 Fri-23:59,10M Sat-10:00,1M Sun-20:00,off"`
|
||||||
|
|
||||||
It means that, the transfer bandwidth will be set to 512 kByte/s on
|
It means that, the transfer bandwidth will be set to 512 KiByte/s on
|
||||||
Monday. It will rise to 10 MByte/s before the end of Friday. At 10:00
|
Monday. It will rise to 10 MiByte/s before the end of Friday. At 10:00
|
||||||
on Saturday it will be set to 1 MByte/s. From 20:00 on Sunday it will
|
on Saturday it will be set to 1 MiByte/s. From 20:00 on Sunday it will
|
||||||
be unlimited.
|
be unlimited.
|
||||||
|
|
||||||
Timeslots without `WEEKDAY` are extended to the whole week. So this
|
Timeslots without `WEEKDAY` are extended to the whole week. So this
|
||||||
|
@ -536,7 +536,7 @@ being the non HTTP backends, `ftp`, `sftp` and `tardigrade`).
|
||||||
Note that the units are **Byte/s**, not **bit/s**. Typically
|
Note that the units are **Byte/s**, not **bit/s**. Typically
|
||||||
connections are measured in bit/s - to convert divide by 8. For
|
connections are measured in bit/s - to convert divide by 8. For
|
||||||
example, let's say you have a 10 Mbit/s connection and you wish rclone
|
example, let's say you have a 10 Mbit/s connection and you wish rclone
|
||||||
to use half of it - 5 Mbit/s. This is 5/8 = 0.625 MByte/s so you would
|
to use half of it - 5 Mbit/s. This is 5/8 = 0.625 MiByte/s so you would
|
||||||
use a `--bwlimit 0.625M` parameter for rclone.
|
use a `--bwlimit 0.625M` parameter for rclone.
|
||||||
|
|
||||||
On Unix systems (Linux, macOS, …) the bandwidth limiter can be toggled by
|
On Unix systems (Linux, macOS, …) the bandwidth limiter can be toggled by
|
||||||
|
@ -557,7 +557,7 @@ change the bwlimit dynamically:
|
||||||
This option controls per file bandwidth limit. For the options see the
|
This option controls per file bandwidth limit. For the options see the
|
||||||
`--bwlimit` flag.
|
`--bwlimit` flag.
|
||||||
|
|
||||||
For example use this to allow no transfers to be faster than 1 MByte/s
|
For example use this to allow no transfers to be faster than 1 MiByte/s
|
||||||
|
|
||||||
--bwlimit-file 1M
|
--bwlimit-file 1M
|
||||||
|
|
||||||
|
@ -770,7 +770,7 @@ which feature does what.
|
||||||
|
|
||||||
This flag can be useful for debugging and in exceptional circumstances
|
This flag can be useful for debugging and in exceptional circumstances
|
||||||
(e.g. Google Drive limiting the total volume of Server Side Copies to
|
(e.g. Google Drive limiting the total volume of Server Side Copies to
|
||||||
100GB/day).
|
100 GiB/day).
|
||||||
|
|
||||||
### --dscp VALUE ###
|
### --dscp VALUE ###
|
||||||
|
|
||||||
|
@ -1047,7 +1047,7 @@ This is the maximum allowable backlog of files in a sync/copy/move
|
||||||
queued for being checked or transferred.
|
queued for being checked or transferred.
|
||||||
|
|
||||||
This can be set arbitrarily large. It will only use memory when the
|
This can be set arbitrarily large. It will only use memory when the
|
||||||
queue is in use. Note that it will use in the order of N kB of memory
|
queue is in use. Note that it will use in the order of N KiB of memory
|
||||||
when the backlog is in use.
|
when the backlog is in use.
|
||||||
|
|
||||||
Setting this large allows rclone to calculate how many files are
|
Setting this large allows rclone to calculate how many files are
|
||||||
|
@ -1176,13 +1176,13 @@ size of the file. To calculate the number of download streams Rclone
|
||||||
divides the size of the file by the `--multi-thread-cutoff` and rounds
|
divides the size of the file by the `--multi-thread-cutoff` and rounds
|
||||||
up, up to the maximum set with `--multi-thread-streams`.
|
up, up to the maximum set with `--multi-thread-streams`.
|
||||||
|
|
||||||
So if `--multi-thread-cutoff 250MB` and `--multi-thread-streams 4` are
|
So if `--multi-thread-cutoff 250M` and `--multi-thread-streams 4` are
|
||||||
in effect (the defaults):
|
in effect (the defaults):
|
||||||
|
|
||||||
- 0MB..250MB files will be downloaded with 1 stream
|
- 0..250 MiB files will be downloaded with 1 stream
|
||||||
- 250MB..500MB files will be downloaded with 2 streams
|
- 250..500 MiB files will be downloaded with 2 streams
|
||||||
- 500MB..750MB files will be downloaded with 3 streams
|
- 500..750 MiB files will be downloaded with 3 streams
|
||||||
- 750MB+ files will be downloaded with 4 streams
|
- 750+ MiB files will be downloaded with 4 streams
|
||||||
|
|
||||||
### --no-check-dest ###
|
### --no-check-dest ###
|
||||||
|
|
||||||
|
|
2188
docs/content/docs.md.orig
Normal file
2188
docs/content/docs.md.orig
Normal file
File diff suppressed because it is too large
Load Diff
|
@ -993,7 +993,7 @@ See: https://github.com/rclone/rclone/issues/3631
|
||||||
|
|
||||||
Make upload limit errors be fatal
|
Make upload limit errors be fatal
|
||||||
|
|
||||||
At the time of writing it is only possible to upload 750GB of data to
|
At the time of writing it is only possible to upload 750 GiB of data to
|
||||||
Google Drive a day (this is an undocumented limit). When this limit is
|
Google Drive a day (this is an undocumented limit). When this limit is
|
||||||
reached Google Drive produces a slightly different error message. When
|
reached Google Drive produces a slightly different error message. When
|
||||||
this flag is set it causes these errors to be fatal. These will stop
|
this flag is set it causes these errors to be fatal. These will stop
|
||||||
|
@ -1014,7 +1014,7 @@ See: https://github.com/rclone/rclone/issues/3857
|
||||||
|
|
||||||
Make download limit errors be fatal
|
Make download limit errors be fatal
|
||||||
|
|
||||||
At the time of writing it is only possible to download 10TB of data from
|
At the time of writing it is only possible to download 10 TiB of data from
|
||||||
Google Drive a day (this is an undocumented limit). When this limit is
|
Google Drive a day (this is an undocumented limit). When this limit is
|
||||||
reached Google Drive produces a slightly different error message. When
|
reached Google Drive produces a slightly different error message. When
|
||||||
this flag is set it causes these errors to be fatal. These will stop
|
this flag is set it causes these errors to be fatal. These will stop
|
||||||
|
@ -1226,7 +1226,7 @@ Use the -i flag to see what would be copied before copying.
|
||||||
|
|
||||||
Drive has quite a lot of rate limiting. This causes rclone to be
|
Drive has quite a lot of rate limiting. This causes rclone to be
|
||||||
limited to transferring about 2 files per second only. Individual
|
limited to transferring about 2 files per second only. Individual
|
||||||
files may be transferred much faster at 100s of MByte/s but lots of
|
files may be transferred much faster at 100s of MiByte/s but lots of
|
||||||
small files can take a long time.
|
small files can take a long time.
|
||||||
|
|
||||||
Server side copies are also subject to a separate rate limit. If you
|
Server side copies are also subject to a separate rate limit. If you
|
||||||
|
|
|
@ -185,7 +185,7 @@ Any files larger than this will be uploaded in chunks of this size.
|
||||||
|
|
||||||
Note that chunks are buffered in memory (one at a time) so rclone can
|
Note that chunks are buffered in memory (one at a time) so rclone can
|
||||||
deal with retries. Setting this larger will increase the speed
|
deal with retries. Setting this larger will increase the speed
|
||||||
slightly (at most 10% for 128MB in tests) at the cost of using more
|
slightly (at most 10% for 128 MiB in tests) at the cost of using more
|
||||||
memory. It can be set smaller if you are tight on memory.
|
memory. It can be set smaller if you are tight on memory.
|
||||||
|
|
||||||
- Config: chunk_size
|
- Config: chunk_size
|
||||||
|
|
|
@ -586,17 +586,17 @@ remote or flag value. The fix then is to quote values containing spaces.
|
||||||
### `--min-size` - Don't transfer any file smaller than this
|
### `--min-size` - Don't transfer any file smaller than this
|
||||||
|
|
||||||
Controls the minimum size file within the scope of an rclone command.
|
Controls the minimum size file within the scope of an rclone command.
|
||||||
Default units are `kBytes` but abbreviations `k`, `M`, or `G` are valid.
|
Default units are `KiByte` but abbreviations `K`, `M`, `G`, `T` or `P` are valid.
|
||||||
|
|
||||||
E.g. `rclone ls remote: --min-size 50k` lists files on `remote:` of 50 kByte
|
E.g. `rclone ls remote: --min-size 50k` lists files on `remote:` of 50 KiByte
|
||||||
size or larger.
|
size or larger.
|
||||||
|
|
||||||
### `--max-size` - Don't transfer any file larger than this
|
### `--max-size` - Don't transfer any file larger than this
|
||||||
|
|
||||||
Controls the maximum size file within the scope of an rclone command.
|
Controls the maximum size file within the scope of an rclone command.
|
||||||
Default units are `kBytes` but abbreviations `k`, `M`, or `G` are valid.
|
Default units are `KiByte` but abbreviations `K`, `M`, `G`, `T` or `P` are valid.
|
||||||
|
|
||||||
E.g. `rclone ls remote: --max-size 1G` lists files on `remote:` of 1 GByte
|
E.g. `rclone ls remote: --max-size 1G` lists files on `remote:` of 1 GiByte
|
||||||
size or smaller.
|
size or smaller.
|
||||||
|
|
||||||
### `--max-age` - Don't transfer any file older than this
|
### `--max-age` - Don't transfer any file older than this
|
||||||
|
@ -650,8 +650,8 @@ E.g. the scope of `rclone sync -i A: B:` can be restricted:
|
||||||
|
|
||||||
rclone --min-size 50k --delete-excluded sync A: B:
|
rclone --min-size 50k --delete-excluded sync A: B:
|
||||||
|
|
||||||
All files on `B:` which are less than 50 kBytes are deleted
|
All files on `B:` which are less than 50 KiByte are deleted
|
||||||
because they are excluded from the rclone sync command.
|
because they are excluded from the rclone sync command.
|
||||||
|
|
||||||
### `--dump filters` - dump the filters to the output
|
### `--dump filters` - dump the filters to the output
|
||||||
|
|
||||||
|
|
|
@ -18,8 +18,8 @@ These flags are available for every command.
|
||||||
--backup-dir string Make backups into hierarchy based in DIR.
|
--backup-dir string Make backups into hierarchy based in DIR.
|
||||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||||
--bwlimit BwTimetable Bandwidth limit in kByte/s, or use suffix b|k|M|G or a full timetable.
|
--bwlimit BwTimetable Bandwidth limit in KiByte/s, or use suffix B|K|M|G|T|P or a full timetable.
|
||||||
--bwlimit-file BwTimetable Bandwidth limit per file in kByte/s, or use suffix b|k|M|G or a full timetable.
|
--bwlimit-file BwTimetable Bandwidth limit per file in KiByte/s, or use suffix B|K|M|G|T|P or a full timetable.
|
||||||
--ca-cert string CA certificate used to verify servers
|
--ca-cert string CA certificate used to verify servers
|
||||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||||
--check-first Do all the checks before starting transfers.
|
--check-first Do all the checks before starting transfers.
|
||||||
|
@ -79,12 +79,12 @@ These flags are available for every command.
|
||||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||||
--max-duration duration Maximum duration rclone will transfer data for.
|
--max-duration duration Maximum duration rclone will transfer data for.
|
||||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
--max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
|
||||||
--max-stats-groups int Maximum number of stats groups to keep in memory. On max oldest is discarded. (default 1000)
|
--max-stats-groups int Maximum number of stats groups to keep in memory. On max oldest is discarded. (default 1000)
|
||||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||||
--memprofile string Write memory profile to file
|
--memprofile string Write memory profile to file
|
||||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
--min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
|
||||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||||
--multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size. (default 250M)
|
--multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size. (default 250M)
|
||||||
--multi-thread-streams int Max number of streams to use for multi-thread downloads. (default 4)
|
--multi-thread-streams int Max number of streams to use for multi-thread downloads. (default 4)
|
||||||
|
@ -170,12 +170,12 @@ and may be set in the config file.
|
||||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||||
--acd-token string OAuth Access Token as a JSON blob.
|
--acd-token string OAuth Access Token as a JSON blob.
|
||||||
--acd-token-url string Token server url.
|
--acd-token-url string Token server url.
|
||||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
--acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||||
--alias-remote string Remote or path to alias.
|
--alias-remote string Remote or path to alias.
|
||||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||||
--azureblob-account string Storage Account Name (leave blank to use SAS URL or Emulator)
|
--azureblob-account string Storage Account Name (leave blank to use SAS URL or Emulator)
|
||||||
--azureblob-archive-tier-delete Delete archive tier blobs before overwriting.
|
--azureblob-archive-tier-delete Delete archive tier blobs before overwriting.
|
||||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100 MiB). (default 4M)
|
||||||
--azureblob-disable-checksum Don't store MD5 checksum with object metadata.
|
--azureblob-disable-checksum Don't store MD5 checksum with object metadata.
|
||||||
--azureblob-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
|
--azureblob-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
|
||||||
--azureblob-endpoint string Endpoint for the service
|
--azureblob-endpoint string Endpoint for the service
|
||||||
|
@ -189,7 +189,7 @@ and may be set in the config file.
|
||||||
--azureblob-public-access string Public access level of a container: blob, container.
|
--azureblob-public-access string Public access level of a container: blob, container.
|
||||||
--azureblob-sas-url string SAS URL for container level access only
|
--azureblob-sas-url string SAS URL for container level access only
|
||||||
--azureblob-service-principal-file string Path to file containing credentials for use with a service principal.
|
--azureblob-service-principal-file string Path to file containing credentials for use with a service principal.
|
||||||
--azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256MB). (Deprecated)
|
--azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB). (Deprecated)
|
||||||
--azureblob-use-emulator Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)
|
--azureblob-use-emulator Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)
|
||||||
--azureblob-use-msi Use a managed service identity to authenticate (only works in Azure)
|
--azureblob-use-msi Use a managed service identity to authenticate (only works in Azure)
|
||||||
--b2-account string Account ID or Application Key ID
|
--b2-account string Account ID or Application Key ID
|
||||||
|
@ -218,7 +218,7 @@ and may be set in the config file.
|
||||||
--box-root-folder-id string Fill in for rclone to use a non root folder as its starting point.
|
--box-root-folder-id string Fill in for rclone to use a non root folder as its starting point.
|
||||||
--box-token string OAuth Access Token as a JSON blob.
|
--box-token string OAuth Access Token as a JSON blob.
|
||||||
--box-token-url string Token server url.
|
--box-token-url string Token server url.
|
||||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB). (default 50M)
|
||||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||||
|
|
|
@ -168,7 +168,7 @@ Leave blank to use the provider defaults.
|
||||||
Above this size files will be chunked into a _segments container.
|
Above this size files will be chunked into a _segments container.
|
||||||
|
|
||||||
Above this size files will be chunked into a _segments container. The
|
Above this size files will be chunked into a _segments container. The
|
||||||
default for this is 5GB which is its maximum value.
|
default for this is 5 GiB which is its maximum value.
|
||||||
|
|
||||||
- Config: chunk_size
|
- Config: chunk_size
|
||||||
- Env Var: RCLONE_HUBIC_CHUNK_SIZE
|
- Env Var: RCLONE_HUBIC_CHUNK_SIZE
|
||||||
|
@ -182,7 +182,7 @@ Don't chunk files during streaming upload.
|
||||||
When doing streaming uploads (e.g. using rcat or mount) setting this
|
When doing streaming uploads (e.g. using rcat or mount) setting this
|
||||||
flag will cause the swift backend to not upload chunked files.
|
flag will cause the swift backend to not upload chunked files.
|
||||||
|
|
||||||
This will limit the maximum upload size to 5GB. However non chunked
|
This will limit the maximum upload size to 5 GiB. However non chunked
|
||||||
files are easier to deal with and have an MD5SUM.
|
files are easier to deal with and have an MD5SUM.
|
||||||
|
|
||||||
Rclone will still chunk files bigger than chunk_size when doing normal
|
Rclone will still chunk files bigger than chunk_size when doing normal
|
||||||
|
|
|
@ -430,7 +430,7 @@ in it will be mapped to `?` instead.
|
||||||
|
|
||||||
#### File sizes ####
|
#### File sizes ####
|
||||||
|
|
||||||
The largest allowed file size is 250GB for both OneDrive Personal and OneDrive for Business [(Updated 13 Jan 2021)](https://support.microsoft.com/en-us/office/invalid-file-names-and-file-types-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us#individualfilesize).
|
The largest allowed file size is 250 GiB for both OneDrive Personal and OneDrive for Business [(Updated 13 Jan 2021)](https://support.microsoft.com/en-us/office/invalid-file-names-and-file-types-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us#individualfilesize).
|
||||||
|
|
||||||
#### Path length ####
|
#### Path length ####
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,7 @@ Here is an overview of the major features of each cloud storage system.
|
||||||
|
|
||||||
¹ Dropbox supports [its own custom
|
¹ Dropbox supports [its own custom
|
||||||
hash](https://www.dropbox.com/developers/reference/content-hash).
|
hash](https://www.dropbox.com/developers/reference/content-hash).
|
||||||
This is an SHA256 sum of all the 4MB block SHA256s.
|
This is an SHA256 sum of all the 4 MiB block SHA256s.
|
||||||
|
|
||||||
² SFTP supports checksums if the same login has shell access and
|
² SFTP supports checksums if the same login has shell access and
|
||||||
`md5sum` or `sha1sum` as well as `echo` are in the remote's PATH.
|
`md5sum` or `sha1sum` as well as `echo` are in the remote's PATH.
|
||||||
|
|
|
@ -101,7 +101,7 @@ docs](/docs/#fast-list) for more details.
|
||||||
### Multipart uploads ###
|
### Multipart uploads ###
|
||||||
|
|
||||||
rclone supports multipart uploads with QingStor which means that it can
|
rclone supports multipart uploads with QingStor which means that it can
|
||||||
upload files bigger than 5GB. Note that files uploaded with multipart
|
upload files bigger than 5 GiB. Note that files uploaded with multipart
|
||||||
upload don't have an MD5SUM.
|
upload don't have an MD5SUM.
|
||||||
|
|
||||||
Note that incomplete multipart uploads older than 24 hours can be
|
Note that incomplete multipart uploads older than 24 hours can be
|
||||||
|
@ -227,7 +227,7 @@ Number of connection retries.
|
||||||
Cutoff for switching to chunked upload
|
Cutoff for switching to chunked upload
|
||||||
|
|
||||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||||
The minimum is 0 and the maximum is 5GB.
|
The minimum is 0 and the maximum is 5 GiB.
|
||||||
|
|
||||||
- Config: upload_cutoff
|
- Config: upload_cutoff
|
||||||
- Env Var: RCLONE_QINGSTOR_UPLOAD_CUTOFF
|
- Env Var: RCLONE_QINGSTOR_UPLOAD_CUTOFF
|
||||||
|
|
|
@ -327,7 +327,7 @@ objects). See the [rclone docs](/docs/#fast-list) for more details.
|
||||||
|
|
||||||
`--fast-list` trades off API transactions for memory use. As a rough
|
`--fast-list` trades off API transactions for memory use. As a rough
|
||||||
guide rclone uses 1k of memory per object stored, so using
|
guide rclone uses 1k of memory per object stored, so using
|
||||||
`--fast-list` on a sync of a million objects will use roughly 1 GB of
|
`--fast-list` on a sync of a million objects will use roughly 1 GiB of
|
||||||
RAM.
|
RAM.
|
||||||
|
|
||||||
If you are only copying a small number of files into a big repository
|
If you are only copying a small number of files into a big repository
|
||||||
|
@ -407,13 +407,13 @@ work with the SDK properly:
|
||||||
### Multipart uploads ###
|
### Multipart uploads ###
|
||||||
|
|
||||||
rclone supports multipart uploads with S3 which means that it can
|
rclone supports multipart uploads with S3 which means that it can
|
||||||
upload files bigger than 5GB.
|
upload files bigger than 5 GiB.
|
||||||
|
|
||||||
Note that files uploaded *both* with multipart upload *and* through
|
Note that files uploaded *both* with multipart upload *and* through
|
||||||
crypt remotes do not have MD5 sums.
|
crypt remotes do not have MD5 sums.
|
||||||
|
|
||||||
rclone switches from single part uploads to multipart uploads at the
|
rclone switches from single part uploads to multipart uploads at the
|
||||||
point specified by `--s3-upload-cutoff`. This can be a maximum of 5GB
|
point specified by `--s3-upload-cutoff`. This can be a maximum of 5 GiB
|
||||||
and a minimum of 0 (ie always upload multipart files).
|
and a minimum of 0 (ie always upload multipart files).
|
||||||
|
|
||||||
The chunk sizes used in the multipart upload are specified by
|
The chunk sizes used in the multipart upload are specified by
|
||||||
|
@ -1412,7 +1412,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||||
Cutoff for switching to chunked upload
|
Cutoff for switching to chunked upload
|
||||||
|
|
||||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||||
The minimum is 0 and the maximum is 5GB.
|
The minimum is 0 and the maximum is 5 GiB.
|
||||||
|
|
||||||
- Config: upload_cutoff
|
- Config: upload_cutoff
|
||||||
- Env Var: RCLONE_S3_UPLOAD_CUTOFF
|
- Env Var: RCLONE_S3_UPLOAD_CUTOFF
|
||||||
|
@ -1438,9 +1438,9 @@ Rclone will automatically increase the chunk size when uploading a
|
||||||
large file of known size to stay below the 10,000 chunks limit.
|
large file of known size to stay below the 10,000 chunks limit.
|
||||||
|
|
||||||
Files of unknown size are uploaded with the configured
|
Files of unknown size are uploaded with the configured
|
||||||
chunk_size. Since the default chunk size is 5MB and there can be at
|
chunk_size. Since the default chunk size is 5 MiB and there can be at
|
||||||
most 10,000 chunks, this means that by default the maximum size of
|
most 10,000 chunks, this means that by default the maximum size of
|
||||||
a file you can stream upload is 48GB. If you wish to stream upload
|
a file you can stream upload is 48 GiB. If you wish to stream upload
|
||||||
larger files then you will need to increase chunk_size.
|
larger files then you will need to increase chunk_size.
|
||||||
|
|
||||||
- Config: chunk_size
|
- Config: chunk_size
|
||||||
|
@ -1474,7 +1474,7 @@ Cutoff for switching to multipart copy
|
||||||
Any files larger than this that need to be server-side copied will be
|
Any files larger than this that need to be server-side copied will be
|
||||||
copied in chunks of this size.
|
copied in chunks of this size.
|
||||||
|
|
||||||
The minimum is 0 and the maximum is 5GB.
|
The minimum is 0 and the maximum is 5 GiB.
|
||||||
|
|
||||||
- Config: copy_cutoff
|
- Config: copy_cutoff
|
||||||
- Env Var: RCLONE_S3_COPY_CUTOFF
|
- Env Var: RCLONE_S3_COPY_CUTOFF
|
||||||
|
|
|
@ -112,10 +112,10 @@ flag.
|
||||||
|
|
||||||
### Transfers ###
|
### Transfers ###
|
||||||
|
|
||||||
For files above 128MB rclone will use a chunked transfer. Rclone will
|
For files above 128 MiB rclone will use a chunked transfer. Rclone will
|
||||||
upload up to `--transfers` chunks at the same time (shared among all
|
upload up to `--transfers` chunks at the same time (shared among all
|
||||||
the multipart uploads). Chunks are buffered in memory and are
|
the multipart uploads). Chunks are buffered in memory and are
|
||||||
normally 64MB so increasing `--transfers` will increase memory use.
|
normally 64 MiB so increasing `--transfers` will increase memory use.
|
||||||
|
|
||||||
### Limitations ###
|
### Limitations ###
|
||||||
|
|
||||||
|
|
|
@ -444,7 +444,7 @@ If true avoid calling abort upload on a failure. It should be set to true for re
|
||||||
Above this size files will be chunked into a _segments container.
|
Above this size files will be chunked into a _segments container.
|
||||||
|
|
||||||
Above this size files will be chunked into a _segments container. The
|
Above this size files will be chunked into a _segments container. The
|
||||||
default for this is 5GB which is its maximum value.
|
default for this is 5 GiB which is its maximum value.
|
||||||
|
|
||||||
- Config: chunk_size
|
- Config: chunk_size
|
||||||
- Env Var: RCLONE_SWIFT_CHUNK_SIZE
|
- Env Var: RCLONE_SWIFT_CHUNK_SIZE
|
||||||
|
@ -458,7 +458,7 @@ Don't chunk files during streaming upload.
|
||||||
When doing streaming uploads (e.g. using rcat or mount) setting this
|
When doing streaming uploads (e.g. using rcat or mount) setting this
|
||||||
flag will cause the swift backend to not upload chunked files.
|
flag will cause the swift backend to not upload chunked files.
|
||||||
|
|
||||||
This will limit the maximum upload size to 5GB. However non chunked
|
This will limit the maximum upload size to 5 GiB. However non chunked
|
||||||
files are easier to deal with and have an MD5SUM.
|
files are easier to deal with and have an MD5SUM.
|
||||||
|
|
||||||
Rclone will still chunk files bigger than chunk_size when doing normal
|
Rclone will still chunk files bigger than chunk_size when doing normal
|
||||||
|
|
|
@ -114,15 +114,15 @@ as they can't be used in JSON strings.
|
||||||
|
|
||||||
### Limitations ###
|
### Limitations ###
|
||||||
|
|
||||||
When uploading very large files (bigger than about 5GB) you will need
|
When uploading very large files (bigger than about 5 GiB) you will need
|
||||||
to increase the `--timeout` parameter. This is because Yandex pauses
|
to increase the `--timeout` parameter. This is because Yandex pauses
|
||||||
(perhaps to calculate the MD5SUM for the entire file) before returning
|
(perhaps to calculate the MD5SUM for the entire file) before returning
|
||||||
confirmation that the file has been uploaded. The default handling of
|
confirmation that the file has been uploaded. The default handling of
|
||||||
timeouts in rclone is to assume a 5 minute pause is an error and close
|
timeouts in rclone is to assume a 5 minute pause is an error and close
|
||||||
the connection - you'll see `net/http: timeout awaiting response
|
the connection - you'll see `net/http: timeout awaiting response
|
||||||
headers` errors in the logs if this is happening. Setting the timeout
|
headers` errors in the logs if this is happening. Setting the timeout
|
||||||
to twice the max size of file in GB should be enough, so if you want
|
to twice the max size of file in GiB should be enough, so if you want
|
||||||
to upload a 30GB file set a timeout of `2 * 30 = 60m`, that is
|
to upload a 30 GiB file set a timeout of `2 * 30 = 60m`, that is
|
||||||
`--timeout 60m`.
|
`--timeout 60m`.
|
||||||
|
|
||||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/yandex/yandex.go then run make backenddocs" >}}
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/yandex/yandex.go then run make backenddocs" >}}
|
||||||
|
|
|
@ -300,15 +300,13 @@ func (s *StatsInfo) String() string {
|
||||||
dateString = ""
|
dateString = ""
|
||||||
elapsedTime = time.Since(startTime)
|
elapsedTime = time.Since(startTime)
|
||||||
elapsedTimeSecondsOnly = elapsedTime.Truncate(time.Second/10) % time.Minute
|
elapsedTimeSecondsOnly = elapsedTime.Truncate(time.Second/10) % time.Minute
|
||||||
displaySpeed = ts.speed
|
displaySpeedString string
|
||||||
displaySpeedUnit string
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if s.ci.DataRateUnit == "bits" {
|
if s.ci.DataRateUnit == "bits" {
|
||||||
displaySpeed *= 8
|
displaySpeedString = fs.SizeSuffix(ts.speed * 8).BitRateUnit()
|
||||||
displaySpeedUnit = "bit/s"
|
|
||||||
} else {
|
} else {
|
||||||
displaySpeedUnit = "Byte/s"
|
displaySpeedString = fs.SizeSuffix(ts.speed).ByteRateUnit()
|
||||||
}
|
}
|
||||||
|
|
||||||
if !s.ci.StatsOneLine {
|
if !s.ci.StatsOneLine {
|
||||||
|
@ -330,12 +328,12 @@ func (s *StatsInfo) String() string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s",
|
_, _ = fmt.Fprintf(buf, "%s%11s / %s, %s, %s, ETA %s%s",
|
||||||
dateString,
|
dateString,
|
||||||
fs.SizeSuffix(s.bytes),
|
fs.SizeSuffix(s.bytes),
|
||||||
fs.SizeSuffix(ts.totalBytes).Unit("Byte"),
|
fs.SizeSuffix(ts.totalBytes).ByteUnit(),
|
||||||
percent(s.bytes, ts.totalBytes),
|
percent(s.bytes, ts.totalBytes),
|
||||||
fs.SizeSuffix(displaySpeed).Unit(displaySpeedUnit),
|
displaySpeedString,
|
||||||
etaString(s.bytes, ts.totalBytes, ts.speed),
|
etaString(s.bytes, ts.totalBytes, ts.speed),
|
||||||
xfrchkString,
|
xfrchkString,
|
||||||
)
|
)
|
||||||
|
|
|
@ -24,7 +24,7 @@ func TestRcBwLimit(t *testing.T) {
|
||||||
"bytesPerSecond": int64(1048576),
|
"bytesPerSecond": int64(1048576),
|
||||||
"bytesPerSecondTx": int64(1048576),
|
"bytesPerSecondTx": int64(1048576),
|
||||||
"bytesPerSecondRx": int64(1048576),
|
"bytesPerSecondRx": int64(1048576),
|
||||||
"rate": "1M",
|
"rate": "1Mi",
|
||||||
}, out)
|
}, out)
|
||||||
assert.Equal(t, rate.Limit(1048576), TokenBucket.curr[0].Limit())
|
assert.Equal(t, rate.Limit(1048576), TokenBucket.curr[0].Limit())
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ func TestRcBwLimit(t *testing.T) {
|
||||||
"bytesPerSecond": int64(1048576),
|
"bytesPerSecond": int64(1048576),
|
||||||
"bytesPerSecondTx": int64(1048576),
|
"bytesPerSecondTx": int64(1048576),
|
||||||
"bytesPerSecondRx": int64(1048576),
|
"bytesPerSecondRx": int64(1048576),
|
||||||
"rate": "1M",
|
"rate": "1Mi",
|
||||||
}, out)
|
}, out)
|
||||||
|
|
||||||
// Set
|
// Set
|
||||||
|
@ -49,7 +49,7 @@ func TestRcBwLimit(t *testing.T) {
|
||||||
"bytesPerSecond": int64(10485760),
|
"bytesPerSecond": int64(10485760),
|
||||||
"bytesPerSecondTx": int64(10485760),
|
"bytesPerSecondTx": int64(10485760),
|
||||||
"bytesPerSecondRx": int64(1048576),
|
"bytesPerSecondRx": int64(1048576),
|
||||||
"rate": "10M:1M",
|
"rate": "10Mi:1Mi",
|
||||||
}, out)
|
}, out)
|
||||||
assert.Equal(t, rate.Limit(10485760), TokenBucket.curr[0].Limit())
|
assert.Equal(t, rate.Limit(10485760), TokenBucket.curr[0].Limit())
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ func TestRcBwLimit(t *testing.T) {
|
||||||
"bytesPerSecond": int64(10485760),
|
"bytesPerSecond": int64(10485760),
|
||||||
"bytesPerSecondTx": int64(10485760),
|
"bytesPerSecondTx": int64(10485760),
|
||||||
"bytesPerSecondRx": int64(1048576),
|
"bytesPerSecondRx": int64(1048576),
|
||||||
"rate": "10M:1M",
|
"rate": "10Mi:1Mi",
|
||||||
}, out)
|
}, out)
|
||||||
|
|
||||||
// Reset
|
// Reset
|
||||||
|
|
|
@ -43,7 +43,7 @@ func TestBwTimetableSet(t *testing.T) {
|
||||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
"666k",
|
"666Ki",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"666:333",
|
"666:333",
|
||||||
|
@ -51,7 +51,7 @@ func TestBwTimetableSet(t *testing.T) {
|
||||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
|
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
"666k:333k",
|
"666Ki:333Ki",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"10:20,666",
|
"10:20,666",
|
||||||
|
@ -65,7 +65,7 @@ func TestBwTimetableSet(t *testing.T) {
|
||||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
"Sun-10:20,666k Mon-10:20,666k Tue-10:20,666k Wed-10:20,666k Thu-10:20,666k Fri-10:20,666k Sat-10:20,666k",
|
"Sun-10:20,666Ki Mon-10:20,666Ki Tue-10:20,666Ki Wed-10:20,666Ki Thu-10:20,666Ki Fri-10:20,666Ki Sat-10:20,666Ki",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"10:20,666:333",
|
"10:20,666:333",
|
||||||
|
@ -79,7 +79,7 @@ func TestBwTimetableSet(t *testing.T) {
|
||||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
|
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
"Sun-10:20,666k:333k Mon-10:20,666k:333k Tue-10:20,666k:333k Wed-10:20,666k:333k Thu-10:20,666k:333k Fri-10:20,666k:333k Sat-10:20,666k:333k",
|
"Sun-10:20,666Ki:333Ki Mon-10:20,666Ki:333Ki Tue-10:20,666Ki:333Ki Wed-10:20,666Ki:333Ki Thu-10:20,666Ki:333Ki Fri-10:20,666Ki:333Ki Sat-10:20,666Ki:333Ki",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"11:00,333 13:40,666 23:50,10M 23:59,off",
|
"11:00,333 13:40,666 23:50,10M 23:59,off",
|
||||||
|
@ -114,7 +114,7 @@ func TestBwTimetableSet(t *testing.T) {
|
||||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}},
|
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: -1}},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
"Sun-11:00,333k Mon-11:00,333k Tue-11:00,333k Wed-11:00,333k Thu-11:00,333k Fri-11:00,333k Sat-11:00,333k Sun-13:40,666k Mon-13:40,666k Tue-13:40,666k Wed-13:40,666k Thu-13:40,666k Fri-13:40,666k Sat-13:40,666k Sun-23:50,10M Mon-23:50,10M Tue-23:50,10M Wed-23:50,10M Thu-23:50,10M Fri-23:50,10M Sat-23:50,10M Sun-23:59,off Mon-23:59,off Tue-23:59,off Wed-23:59,off Thu-23:59,off Fri-23:59,off Sat-23:59,off",
|
"Sun-11:00,333Ki Mon-11:00,333Ki Tue-11:00,333Ki Wed-11:00,333Ki Thu-11:00,333Ki Fri-11:00,333Ki Sat-11:00,333Ki Sun-13:40,666Ki Mon-13:40,666Ki Tue-13:40,666Ki Wed-13:40,666Ki Thu-13:40,666Ki Fri-13:40,666Ki Sat-13:40,666Ki Sun-23:50,10Mi Mon-23:50,10Mi Tue-23:50,10Mi Wed-23:50,10Mi Thu-23:50,10Mi Fri-23:50,10Mi Sat-23:50,10Mi Sun-23:59,off Mon-23:59,off Tue-23:59,off Wed-23:59,off Thu-23:59,off Fri-23:59,off Sat-23:59,off",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"11:00,333:666 13:40,666:off 23:50,10M:1M 23:59,off:10M",
|
"11:00,333:666 13:40,666:off 23:50,10M:1M 23:59,off:10M",
|
||||||
|
@ -149,7 +149,7 @@ func TestBwTimetableSet(t *testing.T) {
|
||||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: 10 * 1024 * 1024}},
|
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2359, Bandwidth: BwPair{Tx: -1, Rx: 10 * 1024 * 1024}},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
"Sun-11:00,333k:666k Mon-11:00,333k:666k Tue-11:00,333k:666k Wed-11:00,333k:666k Thu-11:00,333k:666k Fri-11:00,333k:666k Sat-11:00,333k:666k Sun-13:40,666k:off Mon-13:40,666k:off Tue-13:40,666k:off Wed-13:40,666k:off Thu-13:40,666k:off Fri-13:40,666k:off Sat-13:40,666k:off Sun-23:50,10M:1M Mon-23:50,10M:1M Tue-23:50,10M:1M Wed-23:50,10M:1M Thu-23:50,10M:1M Fri-23:50,10M:1M Sat-23:50,10M:1M Sun-23:59,off:10M Mon-23:59,off:10M Tue-23:59,off:10M Wed-23:59,off:10M Thu-23:59,off:10M Fri-23:59,off:10M Sat-23:59,off:10M",
|
"Sun-11:00,333Ki:666Ki Mon-11:00,333Ki:666Ki Tue-11:00,333Ki:666Ki Wed-11:00,333Ki:666Ki Thu-11:00,333Ki:666Ki Fri-11:00,333Ki:666Ki Sat-11:00,333Ki:666Ki Sun-13:40,666Ki:off Mon-13:40,666Ki:off Tue-13:40,666Ki:off Wed-13:40,666Ki:off Thu-13:40,666Ki:off Fri-13:40,666Ki:off Sat-13:40,666Ki:off Sun-23:50,10Mi:1Mi Mon-23:50,10Mi:1Mi Tue-23:50,10Mi:1Mi Wed-23:50,10Mi:1Mi Thu-23:50,10Mi:1Mi Fri-23:50,10Mi:1Mi Sat-23:50,10Mi:1Mi Sun-23:59,off:10Mi Mon-23:59,off:10Mi Tue-23:59,off:10Mi Wed-23:59,off:10Mi Thu-23:59,off:10Mi Fri-23:59,off:10Mi Sat-23:59,off:10Mi",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Mon-11:00,333 Tue-13:40,666:333 Fri-00:00,10M Sat-10:00,off Sun-23:00,666",
|
"Mon-11:00,333 Tue-13:40,666:333 Fri-00:00,10M Sat-10:00,off Sun-23:00,666",
|
||||||
|
@ -161,7 +161,7 @@ func TestBwTimetableSet(t *testing.T) {
|
||||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
"Mon-11:00,333k Tue-13:40,666k:333k Fri-00:00,10M Sat-10:00,off Sun-23:00,666k",
|
"Mon-11:00,333Ki Tue-13:40,666Ki:333Ki Fri-00:00,10Mi Sat-10:00,off Sun-23:00,666Ki",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Mon-11:00,333 Tue-13:40,666 Fri-00:00,10M 00:01,off Sun-23:00,666:off",
|
"Mon-11:00,333 Tue-13:40,666 Fri-00:00,10M 00:01,off Sun-23:00,666:off",
|
||||||
|
@ -179,7 +179,7 @@ func TestBwTimetableSet(t *testing.T) {
|
||||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: -1}},
|
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: BwPair{Tx: 666 * 1024, Rx: -1}},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
"Mon-11:00,333k Tue-13:40,666k Fri-00:00,10M Sun-00:01,off Mon-00:01,off Tue-00:01,off Wed-00:01,off Thu-00:01,off Fri-00:01,off Sat-00:01,off Sun-23:00,666k:off",
|
"Mon-11:00,333Ki Tue-13:40,666Ki Fri-00:00,10Mi Sun-00:01,off Mon-00:01,off Tue-00:01,off Wed-00:01,off Thu-00:01,off Fri-00:01,off Sat-00:01,off Sun-23:00,666Ki:off",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// from the docs
|
// from the docs
|
||||||
|
@ -222,7 +222,7 @@ func TestBwTimetableSet(t *testing.T) {
|
||||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2300, Bandwidth: BwPair{Tx: -1, Rx: -1}},
|
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2300, Bandwidth: BwPair{Tx: -1, Rx: -1}},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
"Sun-08:00,512k Mon-08:00,512k Tue-08:00,512k Wed-08:00,512k Thu-08:00,512k Fri-08:00,512k Sat-08:00,512k Sun-12:00,10M Mon-12:00,10M Tue-12:00,10M Wed-12:00,10M Thu-12:00,10M Fri-12:00,10M Sat-12:00,10M Sun-13:00,512k Mon-13:00,512k Tue-13:00,512k Wed-13:00,512k Thu-13:00,512k Fri-13:00,512k Sat-13:00,512k Sun-18:00,30M Mon-18:00,30M Tue-18:00,30M Wed-18:00,30M Thu-18:00,30M Fri-18:00,30M Sat-18:00,30M Sun-23:00,off Mon-23:00,off Tue-23:00,off Wed-23:00,off Thu-23:00,off Fri-23:00,off Sat-23:00,off",
|
"Sun-08:00,512Ki Mon-08:00,512Ki Tue-08:00,512Ki Wed-08:00,512Ki Thu-08:00,512Ki Fri-08:00,512Ki Sat-08:00,512Ki Sun-12:00,10Mi Mon-12:00,10Mi Tue-12:00,10Mi Wed-12:00,10Mi Thu-12:00,10Mi Fri-12:00,10Mi Sat-12:00,10Mi Sun-13:00,512Ki Mon-13:00,512Ki Tue-13:00,512Ki Wed-13:00,512Ki Thu-13:00,512Ki Fri-13:00,512Ki Sat-13:00,512Ki Sun-18:00,30Mi Mon-18:00,30Mi Tue-18:00,30Mi Wed-18:00,30Mi Thu-18:00,30Mi Fri-18:00,30Mi Sat-18:00,30Mi Sun-23:00,off Mon-23:00,off Tue-23:00,off Wed-23:00,off Thu-23:00,off Fri-23:00,off Sat-23:00,off",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// from the docs
|
// from the docs
|
||||||
|
@ -234,7 +234,7 @@ func TestBwTimetableSet(t *testing.T) {
|
||||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2000, Bandwidth: BwPair{Tx: -1, Rx: -1}},
|
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2000, Bandwidth: BwPair{Tx: -1, Rx: -1}},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
"Mon-00:00,512k Fri-23:59,10M Sat-10:00,1M Sun-20:00,off",
|
"Mon-00:00,512Ki Fri-23:59,10Mi Sat-10:00,1Mi Sun-20:00,off",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// from the docs
|
// from the docs
|
||||||
|
@ -251,7 +251,7 @@ func TestBwTimetableSet(t *testing.T) {
|
||||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2000, Bandwidth: BwPair{Tx: -1, Rx: -1}},
|
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2000, Bandwidth: BwPair{Tx: -1, Rx: -1}},
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
"Mon-00:00,512k Sun-12:00,1M Mon-12:00,1M Tue-12:00,1M Wed-12:00,1M Thu-12:00,1M Fri-12:00,1M Sat-12:00,1M Sun-20:00,off",
|
"Mon-00:00,512Ki Sun-12:00,1Mi Mon-12:00,1Mi Tue-12:00,1Mi Wed-12:00,1Mi Thu-12:00,1Mi Fri-12:00,1Mi Sat-12:00,1Mi Sun-20:00,off",
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
tt := BwTimetable{}
|
tt := BwTimetable{}
|
||||||
|
@ -537,13 +537,13 @@ func TestBwTimetableMarshalJSON(t *testing.T) {
|
||||||
BwTimetable{
|
BwTimetable{
|
||||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||||
},
|
},
|
||||||
`"666k"`,
|
`"666Ki"`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
BwTimetable{
|
BwTimetable{
|
||||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
|
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 333 * 1024}},
|
||||||
},
|
},
|
||||||
`"666k:333k"`,
|
`"666Ki:333Ki"`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
BwTimetable{
|
BwTimetable{
|
||||||
|
@ -555,7 +555,7 @@ func TestBwTimetableMarshalJSON(t *testing.T) {
|
||||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1020, Bandwidth: BwPair{Tx: 666 * 1024, Rx: 666 * 1024}},
|
||||||
},
|
},
|
||||||
`"Sun-10:20,666k Mon-10:20,666k Tue-10:20,666k Wed-10:20,666k Thu-10:20,666k Fri-10:20,666k Sat-10:20,666k"`,
|
`"Sun-10:20,666Ki Mon-10:20,666Ki Tue-10:20,666Ki Wed-10:20,666Ki Thu-10:20,666Ki Fri-10:20,666Ki Sat-10:20,666Ki"`,
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
got, err := json.Marshal(test.in)
|
got, err := json.Marshal(test.in)
|
||||||
|
|
|
@ -97,8 +97,8 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
|
||||||
flags.IntVarP(flagSet, &ci.StatsFileNameLength, "stats-file-name-length", "", ci.StatsFileNameLength, "Max file name length in stats. 0 for no limit")
|
flags.IntVarP(flagSet, &ci.StatsFileNameLength, "stats-file-name-length", "", ci.StatsFileNameLength, "Max file name length in stats. 0 for no limit")
|
||||||
flags.FVarP(flagSet, &ci.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR")
|
flags.FVarP(flagSet, &ci.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR")
|
||||||
flags.FVarP(flagSet, &ci.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR")
|
flags.FVarP(flagSet, &ci.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR")
|
||||||
flags.FVarP(flagSet, &ci.BwLimit, "bwlimit", "", "Bandwidth limit in kByte/s, or use suffix b|k|M|G or a full timetable.")
|
flags.FVarP(flagSet, &ci.BwLimit, "bwlimit", "", "Bandwidth limit in KiByte/s, or use suffix B|K|M|G|T|P or a full timetable.")
|
||||||
flags.FVarP(flagSet, &ci.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in kByte/s, or use suffix b|k|M|G or a full timetable.")
|
flags.FVarP(flagSet, &ci.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in KiByte/s, or use suffix B|K|M|G|T|P or a full timetable.")
|
||||||
flags.FVarP(flagSet, &ci.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.")
|
flags.FVarP(flagSet, &ci.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.")
|
||||||
flags.FVarP(flagSet, &ci.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.")
|
flags.FVarP(flagSet, &ci.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.")
|
||||||
flags.FVarP(flagSet, &ci.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)
|
flags.FVarP(flagSet, &ci.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)
|
||||||
|
|
|
@ -353,7 +353,7 @@ func ChooseOption(o *fs.Option, name string) string {
|
||||||
case bool:
|
case bool:
|
||||||
what = "boolean value (true or false)"
|
what = "boolean value (true or false)"
|
||||||
case fs.SizeSuffix:
|
case fs.SizeSuffix:
|
||||||
what = "size with suffix k,M,G,T"
|
what = "size with suffix K,M,G,T"
|
||||||
case fs.Duration:
|
case fs.Duration:
|
||||||
what = "duration s,m,h,d,w,M,y"
|
what = "duration s,m,h,d,w,M,y"
|
||||||
case int, int8, int16, int32, int64:
|
case int, int8, int16, int32, int64:
|
||||||
|
|
|
@ -41,8 +41,8 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||||
flags.StringArrayVarP(flagSet, &Opt.FilesFromRaw, "files-from-raw", "", nil, "Read list of source-file names from file without any processing of lines (use - to read from stdin)")
|
flags.StringArrayVarP(flagSet, &Opt.FilesFromRaw, "files-from-raw", "", nil, "Read list of source-file names from file without any processing of lines (use - to read from stdin)")
|
||||||
flags.FVarP(flagSet, &Opt.MinAge, "min-age", "", "Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y")
|
flags.FVarP(flagSet, &Opt.MinAge, "min-age", "", "Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y")
|
||||||
flags.FVarP(flagSet, &Opt.MaxAge, "max-age", "", "Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y")
|
flags.FVarP(flagSet, &Opt.MaxAge, "max-age", "", "Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y")
|
||||||
flags.FVarP(flagSet, &Opt.MinSize, "min-size", "", "Only transfer files bigger than this in k or suffix b|k|M|G")
|
flags.FVarP(flagSet, &Opt.MinSize, "min-size", "", "Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P")
|
||||||
flags.FVarP(flagSet, &Opt.MaxSize, "max-size", "", "Only transfer files smaller than this in k or suffix b|k|M|G")
|
flags.FVarP(flagSet, &Opt.MaxSize, "max-size", "", "Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P")
|
||||||
flags.BoolVarP(flagSet, &Opt.IgnoreCase, "ignore-case", "", false, "Ignore case in filters (case insensitive)")
|
flags.BoolVarP(flagSet, &Opt.IgnoreCase, "ignore-case", "", false, "Ignore case in filters (case insensitive)")
|
||||||
//cvsExclude = BoolP("cvs-exclude", "C", false, "Exclude files in the same way CVS does")
|
//cvsExclude = BoolP("cvs-exclude", "C", false, "Exclude files in the same way CVS does")
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,7 +99,7 @@ func TestOption(t *testing.T) {
|
||||||
Name: "potato",
|
Name: "potato",
|
||||||
Value: SizeSuffix(17 << 20),
|
Value: SizeSuffix(17 << 20),
|
||||||
}
|
}
|
||||||
assert.Equal(t, "17M", d.String())
|
assert.Equal(t, "17Mi", d.String())
|
||||||
assert.Equal(t, "SizeSuffix", d.Type())
|
assert.Equal(t, "SizeSuffix", d.Type())
|
||||||
err := d.Set("18M")
|
err := d.Set("18M")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
|
@ -257,7 +257,7 @@ func TestExecuteJobWithConfig(t *testing.T) {
|
||||||
called := false
|
called := false
|
||||||
jobFn := func(ctx context.Context, in rc.Params) (rc.Params, error) {
|
jobFn := func(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
assert.Equal(t, 42*fs.MebiByte, ci.BufferSize)
|
assert.Equal(t, 42*fs.Mebi, ci.BufferSize)
|
||||||
called = true
|
called = true
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
@ -278,7 +278,7 @@ func TestExecuteJobWithConfig(t *testing.T) {
|
||||||
assert.Equal(t, true, called)
|
assert.Equal(t, true, called)
|
||||||
// Check that wasn't the default
|
// Check that wasn't the default
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
assert.NotEqual(t, 42*fs.MebiByte, ci.BufferSize)
|
assert.NotEqual(t, 42*fs.Mebi, ci.BufferSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExecuteJobWithFilter(t *testing.T) {
|
func TestExecuteJobWithFilter(t *testing.T) {
|
||||||
|
|
159
fs/sizesuffix.go
159
fs/sizesuffix.go
|
@ -1,6 +1,6 @@
|
||||||
package fs
|
package fs
|
||||||
|
|
||||||
// SizeSuffix is parsed by flag with k/M/G suffixes
|
// SizeSuffix is parsed by flag with K/M/G binary suffixes
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -17,13 +17,21 @@ type SizeSuffix int64
|
||||||
|
|
||||||
// Common multipliers for SizeSuffix
|
// Common multipliers for SizeSuffix
|
||||||
const (
|
const (
|
||||||
Byte SizeSuffix = 1 << (iota * 10)
|
SizeSuffixBase SizeSuffix = 1 << (iota * 10)
|
||||||
KibiByte
|
Kibi
|
||||||
MebiByte
|
Mebi
|
||||||
GibiByte
|
Gibi
|
||||||
TebiByte
|
Tebi
|
||||||
PebiByte
|
Pebi
|
||||||
ExbiByte
|
Exbi
|
||||||
|
)
|
||||||
|
const (
|
||||||
|
// SizeSuffixMax is the largest SizeSuffix multiplier
|
||||||
|
SizeSuffixMax = Exbi
|
||||||
|
// SizeSuffixMaxValue is the largest value that can be used to create SizeSuffix
|
||||||
|
SizeSuffixMaxValue = math.MaxInt64
|
||||||
|
// SizeSuffixMinValue is the smallest value that can be used to create SizeSuffix
|
||||||
|
SizeSuffixMinValue = math.MinInt64
|
||||||
)
|
)
|
||||||
|
|
||||||
// Turn SizeSuffix into a string and a suffix
|
// Turn SizeSuffix into a string and a suffix
|
||||||
|
@ -35,24 +43,27 @@ func (x SizeSuffix) string() (string, string) {
|
||||||
return "off", ""
|
return "off", ""
|
||||||
case x == 0:
|
case x == 0:
|
||||||
return "0", ""
|
return "0", ""
|
||||||
case x < 1<<10:
|
case x < Kibi:
|
||||||
scaled = float64(x)
|
scaled = float64(x)
|
||||||
suffix = ""
|
suffix = ""
|
||||||
case x < 1<<20:
|
case x < Mebi:
|
||||||
scaled = float64(x) / (1 << 10)
|
scaled = float64(x) / float64(Kibi)
|
||||||
suffix = "k"
|
suffix = "Ki"
|
||||||
case x < 1<<30:
|
case x < Gibi:
|
||||||
scaled = float64(x) / (1 << 20)
|
scaled = float64(x) / float64(Mebi)
|
||||||
suffix = "M"
|
suffix = "Mi"
|
||||||
case x < 1<<40:
|
case x < Tebi:
|
||||||
scaled = float64(x) / (1 << 30)
|
scaled = float64(x) / float64(Gibi)
|
||||||
suffix = "G"
|
suffix = "Gi"
|
||||||
case x < 1<<50:
|
case x < Pebi:
|
||||||
scaled = float64(x) / (1 << 40)
|
scaled = float64(x) / float64(Tebi)
|
||||||
suffix = "T"
|
suffix = "Ti"
|
||||||
|
case x < Exbi:
|
||||||
|
scaled = float64(x) / float64(Pebi)
|
||||||
|
suffix = "Pi"
|
||||||
default:
|
default:
|
||||||
scaled = float64(x) / (1 << 50)
|
scaled = float64(x) / float64(Exbi)
|
||||||
suffix = "P"
|
suffix = "Ei"
|
||||||
}
|
}
|
||||||
if math.Floor(scaled) == scaled {
|
if math.Floor(scaled) == scaled {
|
||||||
return fmt.Sprintf("%.0f", scaled), suffix
|
return fmt.Sprintf("%.0f", scaled), suffix
|
||||||
|
@ -67,12 +78,67 @@ func (x SizeSuffix) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unit turns SizeSuffix into a string with a unit
|
// Unit turns SizeSuffix into a string with a unit
|
||||||
func (x SizeSuffix) Unit(unit string) string {
|
func (x SizeSuffix) unit(unit string) string {
|
||||||
val, suffix := x.string()
|
val, suffix := x.string()
|
||||||
if val == "off" {
|
if val == "off" {
|
||||||
return val
|
return val
|
||||||
}
|
}
|
||||||
return val + " " + suffix + unit
|
var suffixUnit string
|
||||||
|
if suffix != "" && unit != "" {
|
||||||
|
suffixUnit = suffix + unit
|
||||||
|
} else {
|
||||||
|
suffixUnit = suffix + unit
|
||||||
|
}
|
||||||
|
return val + " " + suffixUnit
|
||||||
|
}
|
||||||
|
|
||||||
|
// BitUnit turns SizeSuffix into a string with bit unit
|
||||||
|
func (x SizeSuffix) BitUnit() string {
|
||||||
|
return x.unit("bit")
|
||||||
|
}
|
||||||
|
|
||||||
|
// BitRateUnit turns SizeSuffix into a string with bit rate unit
|
||||||
|
func (x SizeSuffix) BitRateUnit() string {
|
||||||
|
return x.unit("bit/s")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByteUnit turns SizeSuffix into a string with byte unit
|
||||||
|
func (x SizeSuffix) ByteUnit() string {
|
||||||
|
return x.unit("Byte")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByteRateUnit turns SizeSuffix into a string with byte rate unit
|
||||||
|
func (x SizeSuffix) ByteRateUnit() string {
|
||||||
|
return x.unit("Byte/s")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByteShortUnit turns SizeSuffix into a string with byte unit short form
|
||||||
|
func (x SizeSuffix) ByteShortUnit() string {
|
||||||
|
return x.unit("B")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByteRateShortUnit turns SizeSuffix into a string with byte rate unit short form
|
||||||
|
func (x SizeSuffix) ByteRateShortUnit() string {
|
||||||
|
return x.unit("B/s")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *SizeSuffix) multiplierFromSymbol(s byte) (found bool, multiplier float64) {
|
||||||
|
switch s {
|
||||||
|
case 'k', 'K':
|
||||||
|
return true, float64(Kibi)
|
||||||
|
case 'm', 'M':
|
||||||
|
return true, float64(Mebi)
|
||||||
|
case 'g', 'G':
|
||||||
|
return true, float64(Gibi)
|
||||||
|
case 't', 'T':
|
||||||
|
return true, float64(Tebi)
|
||||||
|
case 'p', 'P':
|
||||||
|
return true, float64(Pebi)
|
||||||
|
case 'e', 'E':
|
||||||
|
return true, float64(Exbi)
|
||||||
|
default:
|
||||||
|
return false, float64(SizeSuffixBase)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set a SizeSuffix
|
// Set a SizeSuffix
|
||||||
|
@ -86,25 +152,42 @@ func (x *SizeSuffix) Set(s string) error {
|
||||||
}
|
}
|
||||||
suffix := s[len(s)-1]
|
suffix := s[len(s)-1]
|
||||||
suffixLen := 1
|
suffixLen := 1
|
||||||
|
multiplierFound := false
|
||||||
var multiplier float64
|
var multiplier float64
|
||||||
switch suffix {
|
switch suffix {
|
||||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
|
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
|
||||||
suffixLen = 0
|
suffixLen = 0
|
||||||
multiplier = 1 << 10
|
multiplier = float64(Kibi)
|
||||||
case 'b', 'B':
|
case 'b', 'B':
|
||||||
multiplier = 1
|
if len(s) > 2 && s[len(s)-2] == 'i' {
|
||||||
case 'k', 'K':
|
suffix = s[len(s)-3]
|
||||||
multiplier = 1 << 10
|
suffixLen = 3
|
||||||
case 'm', 'M':
|
if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); !multiplierFound {
|
||||||
multiplier = 1 << 20
|
return errors.Errorf("bad suffix %q", suffix)
|
||||||
case 'g', 'G':
|
}
|
||||||
multiplier = 1 << 30
|
// Could also support SI form MB, and treat it equivalent to MiB, but perhaps better to reserve it for CountSuffix?
|
||||||
case 't', 'T':
|
//} else if len(s) > 1 {
|
||||||
multiplier = 1 << 40
|
// suffix = s[len(s)-2]
|
||||||
case 'p', 'P':
|
// if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); multiplierFound {
|
||||||
multiplier = 1 << 50
|
// suffixLen = 2
|
||||||
|
// }
|
||||||
|
//}
|
||||||
|
} else {
|
||||||
|
multiplier = float64(SizeSuffixBase)
|
||||||
|
}
|
||||||
|
case 'i', 'I':
|
||||||
|
if len(s) > 1 {
|
||||||
|
suffix = s[len(s)-2]
|
||||||
|
suffixLen = 2
|
||||||
|
multiplierFound, multiplier = x.multiplierFromSymbol(suffix)
|
||||||
|
}
|
||||||
|
if !multiplierFound {
|
||||||
|
return errors.Errorf("bad suffix %q", suffix)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return errors.Errorf("bad suffix %q", suffix)
|
if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); !multiplierFound {
|
||||||
|
return errors.Errorf("bad suffix %q", suffix)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
s = s[:len(s)-suffixLen]
|
s = s[:len(s)-suffixLen]
|
||||||
value, err := strconv.ParseFloat(s, 64)
|
value, err := strconv.ParseFloat(s, 64)
|
||||||
|
|
|
@ -27,11 +27,11 @@ func TestSizeSuffixString(t *testing.T) {
|
||||||
}{
|
}{
|
||||||
{0, "0"},
|
{0, "0"},
|
||||||
{102, "102"},
|
{102, "102"},
|
||||||
{1024, "1k"},
|
{1024, "1Ki"},
|
||||||
{1024 * 1024, "1M"},
|
{1024 * 1024, "1Mi"},
|
||||||
{1024 * 1024 * 1024, "1G"},
|
{1024 * 1024 * 1024, "1Gi"},
|
||||||
{10 * 1024 * 1024 * 1024, "10G"},
|
{10 * 1024 * 1024 * 1024, "10Gi"},
|
||||||
{10.1 * 1024 * 1024 * 1024, "10.100G"},
|
{10.1 * 1024 * 1024 * 1024, "10.100Gi"},
|
||||||
{-1, "off"},
|
{-1, "off"},
|
||||||
{-100, "off"},
|
{-100, "off"},
|
||||||
} {
|
} {
|
||||||
|
@ -41,26 +41,73 @@ func TestSizeSuffixString(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSizeSuffixUnit(t *testing.T) {
|
func TestSizeSuffixByteShortUnit(t *testing.T) {
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
in float64
|
in float64
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{0, "0 Bytes"},
|
{0, "0 B"},
|
||||||
{102, "102 Bytes"},
|
{102, "102 B"},
|
||||||
{1024, "1 kBytes"},
|
{1024, "1 KiB"},
|
||||||
{1024 * 1024, "1 MBytes"},
|
{1024 * 1024, "1 MiB"},
|
||||||
{1024 * 1024 * 1024, "1 GBytes"},
|
{1024 * 1024 * 1024, "1 GiB"},
|
||||||
{10 * 1024 * 1024 * 1024, "10 GBytes"},
|
{10 * 1024 * 1024 * 1024, "10 GiB"},
|
||||||
{10.1 * 1024 * 1024 * 1024, "10.100 GBytes"},
|
{10.1 * 1024 * 1024 * 1024, "10.100 GiB"},
|
||||||
{10 * 1024 * 1024 * 1024 * 1024, "10 TBytes"},
|
{10 * 1024 * 1024 * 1024 * 1024, "10 TiB"},
|
||||||
{10 * 1024 * 1024 * 1024 * 1024 * 1024, "10 PBytes"},
|
{10 * 1024 * 1024 * 1024 * 1024 * 1024, "10 PiB"},
|
||||||
{1 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024, "1024 PBytes"},
|
{1 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024, "1 EiB"},
|
||||||
{-1, "off"},
|
{-1, "off"},
|
||||||
{-100, "off"},
|
{-100, "off"},
|
||||||
} {
|
} {
|
||||||
ss := SizeSuffix(test.in)
|
ss := SizeSuffix(test.in)
|
||||||
got := ss.Unit("Bytes")
|
got := ss.ByteShortUnit()
|
||||||
|
assert.Equal(t, test.want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSizeSuffixByteUnit(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in float64
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{0, "0 Byte"},
|
||||||
|
{102, "102 Byte"},
|
||||||
|
{1024, "1 KiByte"},
|
||||||
|
{1024 * 1024, "1 MiByte"},
|
||||||
|
{1024 * 1024 * 1024, "1 GiByte"},
|
||||||
|
{10 * 1024 * 1024 * 1024, "10 GiByte"},
|
||||||
|
{10.1 * 1024 * 1024 * 1024, "10.100 GiByte"},
|
||||||
|
{10 * 1024 * 1024 * 1024 * 1024, "10 TiByte"},
|
||||||
|
{10 * 1024 * 1024 * 1024 * 1024 * 1024, "10 PiByte"},
|
||||||
|
{1 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024, "1 EiByte"},
|
||||||
|
{-1, "off"},
|
||||||
|
{-100, "off"},
|
||||||
|
} {
|
||||||
|
ss := SizeSuffix(test.in)
|
||||||
|
got := ss.ByteUnit()
|
||||||
|
assert.Equal(t, test.want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSizeSuffixBitRateUnit(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in float64
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{0, "0 bit/s"},
|
||||||
|
{1024, "1 Kibit/s"},
|
||||||
|
{1024 * 1024, "1 Mibit/s"},
|
||||||
|
{1024 * 1024 * 1024, "1 Gibit/s"},
|
||||||
|
{10 * 1024 * 1024 * 1024, "10 Gibit/s"},
|
||||||
|
{10.1 * 1024 * 1024 * 1024, "10.100 Gibit/s"},
|
||||||
|
{10 * 1024 * 1024 * 1024 * 1024, "10 Tibit/s"},
|
||||||
|
{10 * 1024 * 1024 * 1024 * 1024 * 1024, "10 Pibit/s"},
|
||||||
|
{1 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024, "1 Eibit/s"},
|
||||||
|
{-1, "off"},
|
||||||
|
{-100, "off"},
|
||||||
|
} {
|
||||||
|
ss := SizeSuffix(test.in)
|
||||||
|
got := ss.BitRateUnit()
|
||||||
assert.Equal(t, test.want, got)
|
assert.Equal(t, test.want, got)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -77,9 +124,23 @@ func TestSizeSuffixSet(t *testing.T) {
|
||||||
{"0.1k", 102, false},
|
{"0.1k", 102, false},
|
||||||
{"0.1", 102, false},
|
{"0.1", 102, false},
|
||||||
{"1K", 1024, false},
|
{"1K", 1024, false},
|
||||||
|
{"1k", 1024, false},
|
||||||
|
//{"1KB", 1024, false},
|
||||||
|
//{"1kB", 1024, false},
|
||||||
|
//{"1kb", 1024, false},
|
||||||
|
{"1KI", 1024, false},
|
||||||
|
{"1Ki", 1024, false},
|
||||||
|
{"1kI", 1024, false},
|
||||||
|
{"1ki", 1024, false},
|
||||||
|
{"1KiB", 1024, false},
|
||||||
|
{"1KiB", 1024, false},
|
||||||
|
{"1kib", 1024, false},
|
||||||
{"1", 1024, false},
|
{"1", 1024, false},
|
||||||
{"2.5", 1024 * 2.5, false},
|
{"2.5", 1024 * 2.5, false},
|
||||||
{"1M", 1024 * 1024, false},
|
{"1M", 1024 * 1024, false},
|
||||||
|
//{"1MB", 1024 * 1024, false},
|
||||||
|
{"1Mi", 1024 * 1024, false},
|
||||||
|
{"1MiB", 1024 * 1024, false},
|
||||||
{"1.g", 1024 * 1024 * 1024, false},
|
{"1.g", 1024 * 1024 * 1024, false},
|
||||||
{"10G", 10 * 1024 * 1024 * 1024, false},
|
{"10G", 10 * 1024 * 1024 * 1024, false},
|
||||||
{"10T", 10 * 1024 * 1024 * 1024 * 1024, false},
|
{"10T", 10 * 1024 * 1024 * 1024 * 1024, false},
|
||||||
|
@ -91,6 +152,9 @@ func TestSizeSuffixSet(t *testing.T) {
|
||||||
{"1.q", 0, true},
|
{"1.q", 0, true},
|
||||||
{"1q", 0, true},
|
{"1q", 0, true},
|
||||||
{"-1K", 0, true},
|
{"-1K", 0, true},
|
||||||
|
{"1i", 0, true},
|
||||||
|
{"1iB", 0, true},
|
||||||
|
{"1MB", 0, true},
|
||||||
} {
|
} {
|
||||||
ss := SizeSuffix(0)
|
ss := SizeSuffix(0)
|
||||||
err := ss.Set(test.in)
|
err := ss.Set(test.in)
|
||||||
|
|
|
@ -1792,7 +1792,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||||
minChunkSize = opt.ChunkedUpload.CeilChunkSize(minChunkSize)
|
minChunkSize = opt.ChunkedUpload.CeilChunkSize(minChunkSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
maxChunkSize := 2 * fs.MebiByte
|
maxChunkSize := 2 * fs.Mebi
|
||||||
if maxChunkSize < 2*minChunkSize {
|
if maxChunkSize < 2*minChunkSize {
|
||||||
maxChunkSize = 2 * minChunkSize
|
maxChunkSize = 2 * minChunkSize
|
||||||
}
|
}
|
||||||
|
|
|
@ -543,7 +543,7 @@ func fillInMissingSizes(total, used, free, unknownFree int64) (newTotal, newUsed
|
||||||
return total, used, free
|
return total, used, free
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the total size isn't known then we will aim for this many bytes free (1PB)
|
// If the total size isn't known then we will aim for this many bytes free (1 PiB)
|
||||||
const unknownFreeBytes = 1 << 50
|
const unknownFreeBytes = 1 << 50
|
||||||
|
|
||||||
// Statfs returns into about the filing system if known
|
// Statfs returns into about the filing system if known
|
||||||
|
|
|
@ -593,7 +593,7 @@ func (dl *downloader) _stop() {
|
||||||
|
|
||||||
// stop the downloader by stopping the async reader buffering
|
// stop the downloader by stopping the async reader buffering
|
||||||
// any more input. This causes all the stuff in the async
|
// any more input. This causes all the stuff in the async
|
||||||
// buffer (which can be many MB) to be written to the disk
|
// buffer (which can be many MiB) to be written to the disk
|
||||||
// before exiting.
|
// before exiting.
|
||||||
if dl.in != nil {
|
if dl.in != nil {
|
||||||
dl.in.StopBuffering()
|
dl.in.StopBuffering()
|
||||||
|
|
|
@ -51,13 +51,13 @@ var DefaultOpt = Options{
|
||||||
CacheMode: CacheModeOff,
|
CacheMode: CacheModeOff,
|
||||||
CacheMaxAge: 3600 * time.Second,
|
CacheMaxAge: 3600 * time.Second,
|
||||||
CachePollInterval: 60 * time.Second,
|
CachePollInterval: 60 * time.Second,
|
||||||
ChunkSize: 128 * fs.MebiByte,
|
ChunkSize: 128 * fs.Mebi,
|
||||||
ChunkSizeLimit: -1,
|
ChunkSizeLimit: -1,
|
||||||
CacheMaxSize: -1,
|
CacheMaxSize: -1,
|
||||||
CaseInsensitive: runtime.GOOS == "windows" || runtime.GOOS == "darwin", // default to true on Windows and Mac, false otherwise
|
CaseInsensitive: runtime.GOOS == "windows" || runtime.GOOS == "darwin", // default to true on Windows and Mac, false otherwise
|
||||||
WriteWait: 1000 * time.Millisecond,
|
WriteWait: 1000 * time.Millisecond,
|
||||||
ReadWait: 20 * time.Millisecond,
|
ReadWait: 20 * time.Millisecond,
|
||||||
WriteBack: 5 * time.Second,
|
WriteBack: 5 * time.Second,
|
||||||
ReadAhead: 0 * fs.MebiByte,
|
ReadAhead: 0 * fs.Mebi,
|
||||||
UsedIsSize: false,
|
UsedIsSize: false,
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user