mirror of
https://github.com/rclone/rclone.git
synced 2024-11-22 06:06:27 +08:00
vfs: convert time.Duration option to fs.Duration
This commit is contained in:
parent
2c57fe9826
commit
fc1d8dafd5
8
backend/cache/cache_internal_test.go
vendored
8
backend/cache/cache_internal_test.go
vendored
|
@ -338,7 +338,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
|||
|
||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
vfsflags.Opt.DirCacheTime = fs.Duration(time.Second)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
|
@ -368,7 +368,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
|||
|
||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
vfsflags.Opt.DirCacheTime = fs.Duration(time.Second)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
|
@ -708,7 +708,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
|||
|
||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
||||
vfsflags.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
@ -743,7 +743,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInternalBug2117(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 10
|
||||
vfsflags.Opt.DirCacheTime = fs.Duration(time.Second * 10)
|
||||
|
||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
|
|
|
@ -52,7 +52,7 @@ func findOption(name string, options []string) (found bool) {
|
|||
func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.Options) (options []string) {
|
||||
// Options
|
||||
options = []string{
|
||||
"-o", fmt.Sprintf("attr_timeout=%g", opt.AttrTimeout.Seconds()),
|
||||
"-o", fmt.Sprintf("attr_timeout=%g", time.Duration(opt.AttrTimeout).Seconds()),
|
||||
}
|
||||
if opt.DebugFUSE {
|
||||
options = append(options, "-o", "debug")
|
||||
|
@ -79,7 +79,7 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
|||
// WinFSP so cmount must work with or without it.
|
||||
options = append(options, "-o", "atomic_o_trunc")
|
||||
if opt.DaemonTimeout != 0 {
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(opt.DaemonTimeout.Seconds())))
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(time.Duration(opt.DaemonTimeout).Seconds())))
|
||||
}
|
||||
if opt.AllowOther {
|
||||
options = append(options, "-o", "allow_other")
|
||||
|
|
|
@ -231,9 +231,9 @@ func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool
|
|||
case "vfs-cache-mode":
|
||||
err = getFVarP(&vfsOpt.CacheMode, opt, key)
|
||||
case "vfs-cache-poll-interval":
|
||||
vfsOpt.CachePollInterval, err = opt.GetDuration(key)
|
||||
vfsOpt.CachePollInterval, err = opt.GetFsDuration(key)
|
||||
case "vfs-cache-max-age":
|
||||
vfsOpt.CacheMaxAge, err = opt.GetDuration(key)
|
||||
vfsOpt.CacheMaxAge, err = opt.GetFsDuration(key)
|
||||
case "vfs-cache-max-size":
|
||||
err = getFVarP(&vfsOpt.CacheMaxSize, opt, key)
|
||||
case "vfs-read-chunk-size":
|
||||
|
@ -243,11 +243,11 @@ func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool
|
|||
case "vfs-case-insensitive":
|
||||
vfsOpt.CaseInsensitive, err = opt.GetBool(key)
|
||||
case "vfs-write-wait":
|
||||
vfsOpt.WriteWait, err = opt.GetDuration(key)
|
||||
vfsOpt.WriteWait, err = opt.GetFsDuration(key)
|
||||
case "vfs-read-wait":
|
||||
vfsOpt.ReadWait, err = opt.GetDuration(key)
|
||||
vfsOpt.ReadWait, err = opt.GetFsDuration(key)
|
||||
case "vfs-write-back":
|
||||
vfsOpt.WriteBack, err = opt.GetDuration(key)
|
||||
vfsOpt.WriteBack, err = opt.GetFsDuration(key)
|
||||
case "vfs-read-ahead":
|
||||
err = getFVarP(&vfsOpt.ReadAhead, opt, key)
|
||||
case "vfs-used-is-size":
|
||||
|
@ -259,9 +259,9 @@ func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool
|
|||
case "no-checksum":
|
||||
vfsOpt.NoChecksum, err = opt.GetBool(key)
|
||||
case "dir-cache-time":
|
||||
vfsOpt.DirCacheTime, err = opt.GetDuration(key)
|
||||
vfsOpt.DirCacheTime, err = opt.GetFsDuration(key)
|
||||
case "poll-interval":
|
||||
vfsOpt.PollInterval, err = opt.GetDuration(key)
|
||||
vfsOpt.PollInterval, err = opt.GetFsDuration(key)
|
||||
case "read-only":
|
||||
vfsOpt.ReadOnly, err = opt.GetBool(key)
|
||||
case "dir-perms":
|
||||
|
|
10
vfs/dir.go
10
vfs/dir.go
|
@ -66,7 +66,7 @@ func newDir(vfs *VFS, f fs.Fs, parent *Dir, fsDir fs.Directory) *Dir {
|
|||
inode: newInode(),
|
||||
items: make(map[string]Node),
|
||||
}
|
||||
d.cleanupTimer = time.AfterFunc(vfs.Opt.DirCacheTime*2, d.cacheCleanup)
|
||||
d.cleanupTimer = time.AfterFunc(time.Duration(vfs.Opt.DirCacheTime*2), d.cacheCleanup)
|
||||
d.setHasVirtual(false)
|
||||
return d
|
||||
}
|
||||
|
@ -357,7 +357,7 @@ func (d *Dir) _age(when time.Time) (age time.Duration, stale bool) {
|
|||
return age, true
|
||||
}
|
||||
age = when.Sub(d.read)
|
||||
stale = age > d.vfs.Opt.DirCacheTime
|
||||
stale = age > time.Duration(d.vfs.Opt.DirCacheTime)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -562,7 +562,7 @@ func (d *Dir) _readDir() error {
|
|||
}
|
||||
|
||||
d.read = when
|
||||
d.cleanupTimer.Reset(d.vfs.Opt.DirCacheTime * 2)
|
||||
d.cleanupTimer.Reset(time.Duration(d.vfs.Opt.DirCacheTime * 2))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -745,7 +745,7 @@ func (d *Dir) _readDirFromEntries(entries fs.DirEntries, dirTree dirtree.DirTree
|
|||
dir.read = time.Time{}
|
||||
} else {
|
||||
dir.read = when
|
||||
dir.cleanupTimer.Reset(d.vfs.Opt.DirCacheTime * 2)
|
||||
dir.cleanupTimer.Reset(time.Duration(d.vfs.Opt.DirCacheTime * 2))
|
||||
}
|
||||
}
|
||||
dir.mu.Unlock()
|
||||
|
@ -783,7 +783,7 @@ func (d *Dir) readDirTree() error {
|
|||
}
|
||||
fs.Debugf(d.path, "Reading directory tree done in %s", time.Since(when))
|
||||
d.read = when
|
||||
d.cleanupTimer.Reset(d.vfs.Opt.DirCacheTime * 2)
|
||||
d.cleanupTimer.Reset(time.Duration(d.vfs.Opt.DirCacheTime * 2))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -279,7 +279,7 @@ func getStatus(vfs *VFS, in rc.Params) (out rc.Params, err error) {
|
|||
"supported": vfs.pollChan != nil,
|
||||
"interval": map[string]interface{}{
|
||||
"raw": vfs.Opt.PollInterval,
|
||||
"seconds": vfs.Opt.PollInterval / time.Second,
|
||||
"seconds": time.Duration(vfs.Opt.PollInterval) / time.Second,
|
||||
"string": vfs.Opt.PollInterval.String(),
|
||||
},
|
||||
}, nil
|
||||
|
@ -347,7 +347,7 @@ func rcPollInterval(ctx context.Context, in rc.Params) (out rc.Params, err error
|
|||
}
|
||||
select {
|
||||
case vfs.pollChan <- interval:
|
||||
vfs.Opt.PollInterval = interval
|
||||
vfs.Opt.PollInterval = fs.Duration(interval)
|
||||
case <-timeoutChan:
|
||||
timeoutHit = true
|
||||
}
|
||||
|
|
|
@ -268,7 +268,7 @@ func (fh *ReadFileHandle) readAt(p []byte, off int64) (n int, err error) {
|
|||
maxBuf = len(p)
|
||||
}
|
||||
if gap := off - fh.offset; gap > 0 && gap < int64(8*maxBuf) {
|
||||
waitSequential("read", fh.remote, &fh.cond, fh.file.VFS().Opt.ReadWait, &fh.offset, off)
|
||||
waitSequential("read", fh.remote, &fh.cond, time.Duration(fh.file.VFS().Opt.ReadWait), &fh.offset, off)
|
||||
}
|
||||
doSeek := off != fh.offset
|
||||
if doSeek && fh.noSeek {
|
||||
|
|
|
@ -722,7 +722,7 @@ func TestRWCacheUpdate(t *testing.T) {
|
|||
opt := vfscommon.DefaultOpt
|
||||
opt.CacheMode = vfscommon.CacheModeFull
|
||||
opt.WriteBack = writeBackDelay
|
||||
opt.DirCacheTime = 100 * time.Millisecond
|
||||
opt.DirCacheTime = fs.Duration(100 * time.Millisecond)
|
||||
r, vfs := newTestVFSOpt(t, &opt)
|
||||
|
||||
if r.Fremote.Precision() == fs.ModTimeNotSupported {
|
||||
|
@ -744,7 +744,7 @@ func TestRWCacheUpdate(t *testing.T) {
|
|||
r.CheckRemoteItems(t, file1)
|
||||
|
||||
// Wait for directory cache to expire
|
||||
time.Sleep(2 * opt.DirCacheTime)
|
||||
time.Sleep(time.Duration(2 * opt.DirCacheTime))
|
||||
|
||||
// Check the file is OK via the VFS
|
||||
data, err := vfs.ReadFile(filename)
|
||||
|
|
|
@ -232,7 +232,7 @@ func New(f fs.Fs, opt *vfscommon.Options) *VFS {
|
|||
if do := features.ChangeNotify; do != nil {
|
||||
vfs.pollChan = make(chan time.Duration)
|
||||
do(context.TODO(), vfs.root.changeNotify, vfs.pollChan)
|
||||
vfs.pollChan <- vfs.Opt.PollInterval
|
||||
vfs.pollChan <- time.Duration(vfs.Opt.PollInterval)
|
||||
} else if vfs.Opt.PollInterval > 0 {
|
||||
fs.Infof(f, "poll-interval is not supported by this remote")
|
||||
}
|
||||
|
@ -608,7 +608,7 @@ func (vfs *VFS) Statfs() (total, used, free int64) {
|
|||
defer vfs.usageMu.Unlock()
|
||||
total, used, free = -1, -1, -1
|
||||
doAbout := vfs.f.Features().About
|
||||
if (doAbout != nil || vfs.Opt.UsedIsSize) && (vfs.usageTime.IsZero() || time.Since(vfs.usageTime) >= vfs.Opt.DirCacheTime) {
|
||||
if (doAbout != nil || vfs.Opt.UsedIsSize) && (vfs.usageTime.IsZero() || time.Since(vfs.usageTime) >= time.Duration(vfs.Opt.DirCacheTime)) {
|
||||
var err error
|
||||
ctx := context.TODO()
|
||||
if doAbout == nil {
|
||||
|
|
|
@ -28,7 +28,7 @@ var (
|
|||
|
||||
// Constants uses in the tests
|
||||
const (
|
||||
writeBackDelay = 100 * time.Millisecond // A short writeback delay for testing
|
||||
writeBackDelay = fs.Duration(100 * time.Millisecond) // A short writeback delay for testing
|
||||
waitForWritersDelay = 30 * time.Second // time to wait for existing writers
|
||||
)
|
||||
|
||||
|
|
|
@ -779,7 +779,7 @@ func (c *Cache) clean(kicked bool) {
|
|||
c.mu.Unlock()
|
||||
|
||||
// Remove any files that are over age
|
||||
c.purgeOld(c.opt.CacheMaxAge)
|
||||
c.purgeOld(time.Duration(c.opt.CacheMaxAge))
|
||||
|
||||
// If have a maximum cache size...
|
||||
if c.haveQuotas() {
|
||||
|
@ -830,7 +830,7 @@ func (c *Cache) cleaner(ctx context.Context) {
|
|||
// Start cleaning the cache immediately
|
||||
c.clean(false)
|
||||
// Then every interval specified
|
||||
timer := time.NewTicker(c.opt.CachePollInterval)
|
||||
timer := time.NewTicker(time.Duration(c.opt.CachePollInterval))
|
||||
defer timer.Stop()
|
||||
for {
|
||||
select {
|
||||
|
|
|
@ -628,11 +628,11 @@ func TestCacheRename(t *testing.T) {
|
|||
|
||||
func TestCacheCleaner(t *testing.T) {
|
||||
opt := vfscommon.DefaultOpt
|
||||
opt.CachePollInterval = 10 * time.Millisecond
|
||||
opt.CacheMaxAge = 20 * time.Millisecond
|
||||
opt.CachePollInterval = fs.Duration(10 * time.Millisecond)
|
||||
opt.CacheMaxAge = fs.Duration(20 * time.Millisecond)
|
||||
_, c := newTestCacheOpt(t, opt)
|
||||
|
||||
time.Sleep(2 * opt.CachePollInterval)
|
||||
time.Sleep(time.Duration(2 * opt.CachePollInterval))
|
||||
|
||||
potato := c.Item("potato")
|
||||
potato2, found := c.get("potato")
|
||||
|
@ -640,7 +640,7 @@ func TestCacheCleaner(t *testing.T) {
|
|||
assert.True(t, found)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
time.Sleep(10 * opt.CachePollInterval)
|
||||
time.Sleep(time.Duration(10 * opt.CachePollInterval))
|
||||
potato2, found = c.get("potato")
|
||||
if !found {
|
||||
break
|
||||
|
|
|
@ -126,7 +126,7 @@ func (ws *writeBackItems) _update(item *writeBackItem, expiry time.Time) {
|
|||
func (wb *WriteBack) _newExpiry() time.Time {
|
||||
expiry := time.Now()
|
||||
if wb.opt.WriteBack > 0 {
|
||||
expiry = expiry.Add(wb.opt.WriteBack)
|
||||
expiry = expiry.Add(time.Duration(wb.opt.WriteBack))
|
||||
}
|
||||
// expiry = expiry.Round(time.Millisecond)
|
||||
return expiry
|
||||
|
@ -140,7 +140,7 @@ func (wb *WriteBack) _newItem(id Handle, name string) *writeBackItem {
|
|||
wbItem := &writeBackItem{
|
||||
name: name,
|
||||
expiry: wb._newExpiry(),
|
||||
delay: wb.opt.WriteBack,
|
||||
delay: time.Duration(wb.opt.WriteBack),
|
||||
id: id,
|
||||
}
|
||||
wb._addItem(wbItem)
|
||||
|
@ -368,7 +368,7 @@ func (wb *WriteBack) upload(ctx context.Context, wbItem *writeBackItem) {
|
|||
if errors.Is(err, context.Canceled) {
|
||||
fs.Infof(wbItem.name, "vfs cache: upload canceled")
|
||||
// Upload was cancelled so reset timer
|
||||
wbItem.delay = wb.opt.WriteBack
|
||||
wbItem.delay = time.Duration(wb.opt.WriteBack)
|
||||
} else {
|
||||
fs.Errorf(wbItem.name, "vfs cache: failed to upload try #%d, will retry in %v: %v", wbItem.tries, wbItem.delay, err)
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
func newTestWriteBack(t *testing.T) (wb *WriteBack, cancel func()) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
opt := vfscommon.DefaultOpt
|
||||
opt.WriteBack = 100 * time.Millisecond
|
||||
opt.WriteBack = fs.Duration(100 * time.Millisecond)
|
||||
wb = New(ctx, &opt)
|
||||
return wb, cancel
|
||||
}
|
||||
|
|
|
@ -14,9 +14,9 @@ type Options struct {
|
|||
NoChecksum bool // don't check checksums if set
|
||||
ReadOnly bool // if set VFS is read only
|
||||
NoModTime bool // don't read mod times for files
|
||||
DirCacheTime time.Duration // how long to consider directory listing cache valid
|
||||
DirCacheTime fs.Duration // how long to consider directory listing cache valid
|
||||
Refresh bool // refreshes the directory listing recursively on start
|
||||
PollInterval time.Duration
|
||||
PollInterval fs.Duration
|
||||
Umask int
|
||||
UID uint32
|
||||
GID uint32
|
||||
|
@ -25,15 +25,15 @@ type Options struct {
|
|||
ChunkSize fs.SizeSuffix // if > 0 read files in chunks
|
||||
ChunkSizeLimit fs.SizeSuffix // if > ChunkSize double the chunk size after each chunk until reached
|
||||
CacheMode CacheMode
|
||||
CacheMaxAge time.Duration
|
||||
CacheMaxAge fs.Duration
|
||||
CacheMaxSize fs.SizeSuffix
|
||||
CacheMinFreeSpace fs.SizeSuffix
|
||||
CachePollInterval time.Duration
|
||||
CachePollInterval fs.Duration
|
||||
CaseInsensitive bool
|
||||
BlockNormDupes bool
|
||||
WriteWait time.Duration // time to wait for in-sequence write
|
||||
ReadWait time.Duration // time to wait for in-sequence read
|
||||
WriteBack time.Duration // time to wait before writing back dirty files
|
||||
WriteWait fs.Duration // time to wait for in-sequence write
|
||||
ReadWait fs.Duration // time to wait for in-sequence read
|
||||
WriteBack fs.Duration // time to wait before writing back dirty files
|
||||
ReadAhead fs.SizeSuffix // bytes to read ahead in cache mode "full"
|
||||
UsedIsSize bool // if true, use the `rclone size` algorithm for Used size
|
||||
FastFingerprint bool // if set use fast fingerprints
|
||||
|
@ -45,9 +45,9 @@ var DefaultOpt = Options{
|
|||
NoModTime: false,
|
||||
NoChecksum: false,
|
||||
NoSeek: false,
|
||||
DirCacheTime: 5 * 60 * time.Second,
|
||||
DirCacheTime: fs.Duration(5 * 60 * time.Second),
|
||||
Refresh: false,
|
||||
PollInterval: time.Minute,
|
||||
PollInterval: fs.Duration(time.Minute),
|
||||
ReadOnly: false,
|
||||
Umask: 0,
|
||||
UID: ^uint32(0), // these values instruct WinFSP-FUSE to use the current user
|
||||
|
@ -55,16 +55,16 @@ var DefaultOpt = Options{
|
|||
DirPerms: os.FileMode(0777),
|
||||
FilePerms: os.FileMode(0666),
|
||||
CacheMode: CacheModeOff,
|
||||
CacheMaxAge: 3600 * time.Second,
|
||||
CachePollInterval: 60 * time.Second,
|
||||
CacheMaxAge: fs.Duration(3600 * time.Second),
|
||||
CachePollInterval: fs.Duration(60 * time.Second),
|
||||
ChunkSize: 128 * fs.Mebi,
|
||||
ChunkSizeLimit: -1,
|
||||
CacheMaxSize: -1,
|
||||
CacheMinFreeSpace: -1,
|
||||
CaseInsensitive: runtime.GOOS == "windows" || runtime.GOOS == "darwin", // default to true on Windows and Mac, false otherwise
|
||||
WriteWait: 1000 * time.Millisecond,
|
||||
ReadWait: 20 * time.Millisecond,
|
||||
WriteBack: 5 * time.Second,
|
||||
WriteWait: fs.Duration(1000 * time.Millisecond),
|
||||
ReadWait: fs.Duration(20 * time.Millisecond),
|
||||
WriteBack: fs.Duration(5 * time.Second),
|
||||
ReadAhead: 0 * fs.Mebi,
|
||||
UsedIsSize: false,
|
||||
DiskSpaceTotalSize: -1,
|
||||
|
|
|
@ -21,13 +21,13 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
|||
flags.BoolVarP(flagSet, &Opt.NoModTime, "no-modtime", "", Opt.NoModTime, "Don't read/write the modification time (can speed things up)", "VFS")
|
||||
flags.BoolVarP(flagSet, &Opt.NoChecksum, "no-checksum", "", Opt.NoChecksum, "Don't compare checksums on up/download", "VFS")
|
||||
flags.BoolVarP(flagSet, &Opt.NoSeek, "no-seek", "", Opt.NoSeek, "Don't allow seeking in files", "VFS")
|
||||
flags.DurationVarP(flagSet, &Opt.DirCacheTime, "dir-cache-time", "", Opt.DirCacheTime, "Time to cache directory entries for", "VFS")
|
||||
flags.FVarP(flagSet, &Opt.DirCacheTime, "dir-cache-time", "", "Time to cache directory entries for", "VFS")
|
||||
flags.BoolVarP(flagSet, &Opt.Refresh, "vfs-refresh", "", Opt.Refresh, "Refreshes the directory cache recursively in the background on start", "VFS")
|
||||
flags.DurationVarP(flagSet, &Opt.PollInterval, "poll-interval", "", Opt.PollInterval, "Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable)", "VFS")
|
||||
flags.FVarP(flagSet, &Opt.PollInterval, "poll-interval", "", "Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable)", "VFS")
|
||||
flags.BoolVarP(flagSet, &Opt.ReadOnly, "read-only", "", Opt.ReadOnly, "Only allow read-only access", "VFS")
|
||||
flags.FVarP(flagSet, &Opt.CacheMode, "vfs-cache-mode", "", "Cache mode off|minimal|writes|full", "VFS")
|
||||
flags.DurationVarP(flagSet, &Opt.CachePollInterval, "vfs-cache-poll-interval", "", Opt.CachePollInterval, "Interval to poll the cache for stale objects", "VFS")
|
||||
flags.DurationVarP(flagSet, &Opt.CacheMaxAge, "vfs-cache-max-age", "", Opt.CacheMaxAge, "Max time since last access of objects in the cache", "VFS")
|
||||
flags.FVarP(flagSet, &Opt.CachePollInterval, "vfs-cache-poll-interval", "", "Interval to poll the cache for stale objects", "VFS")
|
||||
flags.FVarP(flagSet, &Opt.CacheMaxAge, "vfs-cache-max-age", "", "Max time since last access of objects in the cache", "VFS")
|
||||
flags.FVarP(flagSet, &Opt.CacheMaxSize, "vfs-cache-max-size", "", "Max total size of objects in the cache", "VFS")
|
||||
flags.FVarP(flagSet, &Opt.CacheMinFreeSpace, "vfs-cache-min-free-space", "", "Target minimum free space on the disk containing the cache", "VFS")
|
||||
flags.FVarP(flagSet, &Opt.ChunkSize, "vfs-read-chunk-size", "", "Read the source objects in chunks", "VFS")
|
||||
|
@ -36,9 +36,9 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
|||
flags.FVarP(flagSet, FilePerms, "file-perms", "", "File permissions", "VFS")
|
||||
flags.BoolVarP(flagSet, &Opt.CaseInsensitive, "vfs-case-insensitive", "", Opt.CaseInsensitive, "If a file name not found, find a case insensitive match", "VFS")
|
||||
flags.BoolVarP(flagSet, &Opt.BlockNormDupes, "vfs-block-norm-dupes", "", Opt.BlockNormDupes, "If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)", "VFS")
|
||||
flags.DurationVarP(flagSet, &Opt.WriteWait, "vfs-write-wait", "", Opt.WriteWait, "Time to wait for in-sequence write before giving error", "VFS")
|
||||
flags.DurationVarP(flagSet, &Opt.ReadWait, "vfs-read-wait", "", Opt.ReadWait, "Time to wait for in-sequence read before seeking", "VFS")
|
||||
flags.DurationVarP(flagSet, &Opt.WriteBack, "vfs-write-back", "", Opt.WriteBack, "Time to writeback files after last use when using cache", "VFS")
|
||||
flags.FVarP(flagSet, &Opt.WriteWait, "vfs-write-wait", "", "Time to wait for in-sequence write before giving error", "VFS")
|
||||
flags.FVarP(flagSet, &Opt.ReadWait, "vfs-read-wait", "", "Time to wait for in-sequence read before seeking", "VFS")
|
||||
flags.FVarP(flagSet, &Opt.WriteBack, "vfs-write-back", "", "Time to writeback files after last use when using cache", "VFS")
|
||||
flags.FVarP(flagSet, &Opt.ReadAhead, "vfs-read-ahead", "", "Extra read ahead over --buffer-size when using cache-mode full", "VFS")
|
||||
flags.BoolVarP(flagSet, &Opt.UsedIsSize, "vfs-used-is-size", "", Opt.UsedIsSize, "Use the `rclone size` algorithm for Used size", "VFS")
|
||||
flags.BoolVarP(flagSet, &Opt.FastFingerprint, "vfs-fast-fingerprint", "", Opt.FastFingerprint, "Use fast (less accurate) fingerprints for change detection", "VFS")
|
||||
|
|
|
@ -50,13 +50,13 @@ func RunTests(t *testing.T, useVFS bool, minimumRequiredCacheMode vfscommon.Cach
|
|||
}
|
||||
tests := []struct {
|
||||
cacheMode vfscommon.CacheMode
|
||||
writeBack time.Duration
|
||||
writeBack fs.Duration
|
||||
}{
|
||||
{cacheMode: vfscommon.CacheModeOff},
|
||||
{cacheMode: vfscommon.CacheModeMinimal},
|
||||
{cacheMode: vfscommon.CacheModeWrites},
|
||||
{cacheMode: vfscommon.CacheModeFull},
|
||||
{cacheMode: vfscommon.CacheModeFull, writeBack: 100 * time.Millisecond},
|
||||
{cacheMode: vfscommon.CacheModeFull, writeBack: fs.Duration(100 * time.Millisecond)},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if test.cacheMode < minimumRequiredCacheMode {
|
||||
|
|
|
@ -130,7 +130,7 @@ func (fh *WriteFileHandle) writeAt(p []byte, off int64) (n int, err error) {
|
|||
return 0, ECLOSED
|
||||
}
|
||||
if fh.offset != off {
|
||||
waitSequential("write", fh.remote, &fh.cond, fh.file.VFS().Opt.WriteWait, &fh.offset, off)
|
||||
waitSequential("write", fh.remote, &fh.cond, time.Duration(fh.file.VFS().Opt.WriteWait), &fh.offset, off)
|
||||
}
|
||||
if fh.offset != off {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle.Write: can't seek in file without --vfs-cache-mode >= writes")
|
||||
|
|
Loading…
Reference in New Issue
Block a user