mirror of
https://github.com/rclone/rclone.git
synced 2024-11-25 09:41:44 +08:00
build: fix problems found with unconvert
This commit is contained in:
parent
a551978a3f
commit
d966cef14c
|
@ -80,7 +80,7 @@ func TestNewFS(t *testing.T) {
|
||||||
wantEntry := test.entries[i]
|
wantEntry := test.entries[i]
|
||||||
|
|
||||||
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
||||||
require.Equal(t, wantEntry.size, int64(gotEntry.Size()), what)
|
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
||||||
_, isDir := gotEntry.(fs.Directory)
|
_, isDir := gotEntry.(fs.Directory)
|
||||||
require.Equal(t, wantEntry.isDir, isDir, what)
|
require.Equal(t, wantEntry.isDir, isDir, what)
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,7 +77,7 @@ func init() {
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_cutoff",
|
Name: "upload_cutoff",
|
||||||
Help: "Cutoff for switching to chunked upload (<= 256MB).",
|
Help: "Cutoff for switching to chunked upload (<= 256MB).",
|
||||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
Default: defaultUploadCutoff,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
|
@ -85,7 +85,7 @@ func init() {
|
||||||
|
|
||||||
Note that this is stored in memory and there may be up to
|
Note that this is stored in memory and there may be up to
|
||||||
"--transfers" chunks stored at once in memory.`,
|
"--transfers" chunks stored at once in memory.`,
|
||||||
Default: fs.SizeSuffix(defaultChunkSize),
|
Default: defaultChunkSize,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "list_chunk",
|
Name: "list_chunk",
|
||||||
|
@ -1037,7 +1037,7 @@ func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetProper
|
||||||
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
||||||
o.mimeType = info.ContentType()
|
o.mimeType = info.ContentType()
|
||||||
o.size = size
|
o.size = size
|
||||||
o.modTime = time.Time(info.LastModified())
|
o.modTime = info.LastModified()
|
||||||
o.accessTier = azblob.AccessTierType(info.AccessTier())
|
o.accessTier = azblob.AccessTierType(info.AccessTier())
|
||||||
o.setMetadata(metadata)
|
o.setMetadata(metadata)
|
||||||
|
|
||||||
|
|
|
@ -108,7 +108,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
||||||
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||||
|
|
||||||
This value should be set no larger than 4.657GiB (== 5GB).`,
|
This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
Default: defaultUploadCutoff,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
|
@ -118,7 +118,7 @@ When uploading large files, chunk the file into this size. Note that
|
||||||
these chunks are buffered in memory and there might a maximum of
|
these chunks are buffered in memory and there might a maximum of
|
||||||
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
|
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
|
||||||
minimim size.`,
|
minimim size.`,
|
||||||
Default: fs.SizeSuffix(defaultChunkSize),
|
Default: defaultChunkSize,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "disable_checksum",
|
Name: "disable_checksum",
|
||||||
|
|
|
@ -211,8 +211,8 @@ outer:
|
||||||
}
|
}
|
||||||
|
|
||||||
reqSize := remaining
|
reqSize := remaining
|
||||||
if reqSize >= int64(chunkSize) {
|
if reqSize >= chunkSize {
|
||||||
reqSize = int64(chunkSize)
|
reqSize = chunkSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make a block of memory
|
// Make a block of memory
|
||||||
|
|
4
backend/cache/cache_internal_test.go
vendored
4
backend/cache/cache_internal_test.go
vendored
|
@ -387,10 +387,10 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||||
|
|
||||||
// write the object
|
// write the object
|
||||||
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
|
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
|
||||||
require.Equal(t, o.Size(), int64(testSize))
|
require.Equal(t, o.Size(), testSize)
|
||||||
time.Sleep(time.Second * 3)
|
time.Sleep(time.Second * 3)
|
||||||
|
|
||||||
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false)
|
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(checkSample)), o.Size())
|
require.Equal(t, int64(len(checkSample)), o.Size())
|
||||||
|
|
||||||
|
|
6
backend/cache/storage_persistent.go
vendored
6
backend/cache/storage_persistent.go
vendored
|
@ -398,7 +398,7 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(cachedObject.Name), []byte(encoded))
|
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||||
}
|
}
|
||||||
|
@ -809,7 +809,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(destPath), []byte(encoded))
|
err = bucket.Put([]byte(destPath), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
|
@ -1049,7 +1049,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(destPath), []byte(encoded))
|
err = bucket.Put([]byte(destPath), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -463,7 +463,7 @@ func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||||
if int(newRune) < base {
|
if int(newRune) < base {
|
||||||
newRune += 256
|
newRune += 256
|
||||||
}
|
}
|
||||||
_, _ = result.WriteRune(rune(newRune))
|
_, _ = result.WriteRune(newRune)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
_, _ = result.WriteRune(runeValue)
|
_, _ = result.WriteRune(runeValue)
|
||||||
|
|
|
@ -130,8 +130,8 @@ Any files larger than this will be uploaded in chunks of this size.
|
||||||
Note that chunks are buffered in memory (one at a time) so rclone can
|
Note that chunks are buffered in memory (one at a time) so rclone can
|
||||||
deal with retries. Setting this larger will increase the speed
|
deal with retries. Setting this larger will increase the speed
|
||||||
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
||||||
memory. It can be set smaller if you are tight on memory.`, fs.SizeSuffix(maxChunkSize)),
|
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||||
Default: fs.SizeSuffix(defaultChunkSize),
|
Default: defaultChunkSize,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "impersonate",
|
Name: "impersonate",
|
||||||
|
|
|
@ -1006,7 +1006,7 @@ func (o *Object) MimeType() string {
|
||||||
// setMetaData sets the metadata from info
|
// setMetaData sets the metadata from info
|
||||||
func (o *Object) setMetaData(info *api.JottaFile) (err error) {
|
func (o *Object) setMetaData(info *api.JottaFile) (err error) {
|
||||||
o.hasMetaData = true
|
o.hasMetaData = true
|
||||||
o.size = int64(info.Size)
|
o.size = info.Size
|
||||||
o.md5 = info.MD5
|
o.md5 = info.MD5
|
||||||
o.mimeType = info.MimeType
|
o.mimeType = info.MimeType
|
||||||
o.modTime = time.Time(info.ModifiedAt)
|
o.modTime = time.Time(info.ModifiedAt)
|
||||||
|
@ -1212,7 +1212,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||||
|
|
||||||
// finally update the meta data
|
// finally update the meta data
|
||||||
o.hasMetaData = true
|
o.hasMetaData = true
|
||||||
o.size = int64(result.Bytes)
|
o.size = result.Bytes
|
||||||
o.md5 = result.Md5
|
o.md5 = result.Md5
|
||||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -16,7 +16,7 @@ func (f *Fs) About() (*fs.Usage, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
return nil, errors.Wrap(err, "failed to read disk usage")
|
||||||
}
|
}
|
||||||
bs := int64(s.Bsize)
|
bs := int64(s.Bsize) // nolint: unconvert
|
||||||
usage := &fs.Usage{
|
usage := &fs.Usage{
|
||||||
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
|
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
|
||||||
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
|
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
|
||||||
|
|
|
@ -22,5 +22,5 @@ func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
|
||||||
fs.Debugf(fi.Name(), "Type assertion fi.Sys().(*syscall.Stat_t) failed from: %#v", fi.Sys())
|
fs.Debugf(fi.Name(), "Type assertion fi.Sys().(*syscall.Stat_t) failed from: %#v", fi.Sys())
|
||||||
return devUnset
|
return devUnset
|
||||||
}
|
}
|
||||||
return uint64(statT.Dev)
|
return uint64(statT.Dev) // nolint: unconvert
|
||||||
}
|
}
|
||||||
|
|
|
@ -785,7 +785,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||||
remote := path.Join(dir, folder.Name)
|
remote := path.Join(dir, folder.Name)
|
||||||
// cache the directory ID for later lookups
|
// cache the directory ID for later lookups
|
||||||
f.dirCache.Put(remote, folder.FolderID)
|
f.dirCache.Put(remote, folder.FolderID)
|
||||||
d := fs.NewDir(remote, time.Unix(int64(folder.DateModified), 0)).SetID(folder.FolderID)
|
d := fs.NewDir(remote, time.Unix(folder.DateModified, 0)).SetID(folder.FolderID)
|
||||||
d.SetItems(int64(folder.ChildFolders))
|
d.SetItems(int64(folder.ChildFolders))
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
}
|
}
|
||||||
|
|
|
@ -143,7 +143,7 @@ func (u *uploader) init() {
|
||||||
|
|
||||||
// Try to adjust partSize if it is too small and account for
|
// Try to adjust partSize if it is too small and account for
|
||||||
// integer division truncation.
|
// integer division truncation.
|
||||||
if u.totalSize/u.cfg.partSize >= int64(u.cfg.partSize) {
|
if u.totalSize/u.cfg.partSize >= u.cfg.partSize {
|
||||||
// Add one to the part size to account for remainders
|
// Add one to the part size to account for remainders
|
||||||
// during the size calculation. e.g odd number of bytes.
|
// during the size calculation. e.g odd number of bytes.
|
||||||
u.cfg.partSize = (u.totalSize / int64(u.cfg.maxUploadParts)) + 1
|
u.cfg.partSize = (u.totalSize / int64(u.cfg.maxUploadParts)) + 1
|
||||||
|
|
|
@ -307,7 +307,7 @@ func (f *Fs) itemToDirEntry(remote string, object *api.ResourceInfoResponse) (fs
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error parsing time in directory item")
|
return nil, errors.Wrap(err, "error parsing time in directory item")
|
||||||
}
|
}
|
||||||
d := fs.NewDir(remote, t).SetSize(int64(object.Size))
|
d := fs.NewDir(remote, t).SetSize(object.Size)
|
||||||
return d, nil
|
return d, nil
|
||||||
case "file":
|
case "file":
|
||||||
o, err := f.newObjectWithInfo(remote, object)
|
o, err := f.newObjectWithInfo(remote, object)
|
||||||
|
|
|
@ -456,7 +456,7 @@ func AddBackendFlags() {
|
||||||
help = help[:nl]
|
help = help[:nl]
|
||||||
}
|
}
|
||||||
help = strings.TrimSpace(help)
|
help = strings.TrimSpace(help)
|
||||||
flag := pflag.CommandLine.VarPF(opt, name, string(opt.ShortOpt), help)
|
flag := pflag.CommandLine.VarPF(opt, name, opt.ShortOpt, help)
|
||||||
if _, isBool := opt.Default.(bool); isBool {
|
if _, isBool := opt.Default.(bool); isBool {
|
||||||
flag.NoOptDefVal = "true"
|
flag.NoOptDefVal = "true"
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return translateError(err)
|
return translateError(err)
|
||||||
}
|
}
|
||||||
resp.Size = int(n)
|
resp.Size = n
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -158,7 +158,7 @@ func (cds *contentDirectoryService) Handle(action string, argsXML []byte, r *htt
|
||||||
}, nil
|
}, nil
|
||||||
case "Browse":
|
case "Browse":
|
||||||
var browse browse
|
var browse browse
|
||||||
if err := xml.Unmarshal([]byte(argsXML), &browse); err != nil {
|
if err := xml.Unmarshal(argsXML, &browse); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
obj, err := cds.objectFromID(browse.ObjectID)
|
obj, err := cds.objectFromID(browse.ObjectID)
|
||||||
|
@ -179,7 +179,7 @@ func (cds *contentDirectoryService) Handle(action string, argsXML []byte, r *htt
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}():]
|
}():]
|
||||||
if browse.RequestedCount != 0 && int(browse.RequestedCount) < len(objs) {
|
if browse.RequestedCount != 0 && browse.RequestedCount < len(objs) {
|
||||||
objs = objs[:browse.RequestedCount]
|
objs = objs[:browse.RequestedCount]
|
||||||
}
|
}
|
||||||
result, err := xml.Marshal(objs)
|
result, err := xml.Marshal(objs)
|
||||||
|
|
|
@ -255,7 +255,7 @@ func (h Set) GetOne() Type {
|
||||||
i++
|
i++
|
||||||
v >>= 1
|
v >>= 1
|
||||||
}
|
}
|
||||||
return Type(None)
|
return None
|
||||||
}
|
}
|
||||||
|
|
||||||
// Array returns an array of all hash types in the set
|
// Array returns an array of all hash types in the set
|
||||||
|
|
|
@ -100,5 +100,5 @@ func NewRepeatableReaderBuffer(r io.Reader, buf []byte) *RepeatableReader {
|
||||||
// NewRepeatableLimitReaderBuffer create new repeatable reader from
|
// NewRepeatableLimitReaderBuffer create new repeatable reader from
|
||||||
// Reader r and buf wrapped in a io.LimitReader to read only size.
|
// Reader r and buf wrapped in a io.LimitReader to read only size.
|
||||||
func NewRepeatableLimitReaderBuffer(r io.Reader, buf []byte, size int64) *RepeatableReader {
|
func NewRepeatableLimitReaderBuffer(r io.Reader, buf []byte, size int64) *RepeatableReader {
|
||||||
return NewRepeatableReaderBuffer(io.LimitReader(r, int64(size)), buf)
|
return NewRepeatableReaderBuffer(io.LimitReader(r, size), buf)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user