mirror of
https://github.com/rclone/rclone.git
synced 2024-11-25 06:11:43 +08:00
vfs: add --vfs-cache-min-free-space to control minimum free space on the disk containing the cache
See: https://forum.rclone.org/t/rclone-fails-to-control-disk-usage-and-its-filling-the-disk-to-100/41494/
This commit is contained in:
parent
39f910a65d
commit
63ab250817
30
vfs/help.go
30
vfs/help.go
|
@ -84,12 +84,13 @@ write simultaneously to a file. See below for more details.
|
||||||
Note that the VFS cache is separate from the cache backend and you may
|
Note that the VFS cache is separate from the cache backend and you may
|
||||||
find that you need one or the other or both.
|
find that you need one or the other or both.
|
||||||
|
|
||||||
--cache-dir string Directory rclone will use for caching.
|
--cache-dir string Directory rclone will use for caching.
|
||||||
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
|
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
|
||||||
--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
|
--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
|
||||||
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
|
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
|
||||||
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
|
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
|
||||||
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
|
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
|
||||||
|
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
|
||||||
|
|
||||||
If run with !-vv! rclone will print the location of the file cache. The
|
If run with !-vv! rclone will print the location of the file cache. The
|
||||||
files are stored in the user cache file area which is OS dependent but
|
files are stored in the user cache file area which is OS dependent but
|
||||||
|
@ -106,14 +107,15 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||||
uploaded, these will be uploaded next time rclone is run with the same
|
uploaded, these will be uploaded next time rclone is run with the same
|
||||||
flags.
|
flags.
|
||||||
|
|
||||||
If using !--vfs-cache-max-size! note that the cache may exceed this size
|
If using !--vfs-cache-max-size! or !--vfs-cache-min-free-size! note
|
||||||
for two reasons. Firstly because it is only checked every
|
that the cache may exceed these quotas for two reasons. Firstly
|
||||||
!--vfs-cache-poll-interval!. Secondly because open files cannot be
|
because it is only checked every !--vfs-cache-poll-interval!. Secondly
|
||||||
evicted from the cache. When !--vfs-cache-max-size!
|
because open files cannot be evicted from the cache. When
|
||||||
is exceeded, rclone will attempt to evict the least accessed files
|
!--vfs-cache-max-size! or !--vfs-cache-min-free-size! is exceeded,
|
||||||
from the cache first. rclone will start with files that haven't
|
rclone will attempt to evict the least accessed files from the cache
|
||||||
been accessed for the longest. This cache flushing strategy is
|
first. rclone will start with files that haven't been accessed for the
|
||||||
efficient and more relevant files are likely to remain cached.
|
longest. This cache flushing strategy is efficient and more relevant
|
||||||
|
files are likely to remain cached.
|
||||||
|
|
||||||
The !--vfs-cache-max-age! will evict files from the cache
|
The !--vfs-cache-max-age! will evict files from the cache
|
||||||
after the set time since last access has passed. The default value of
|
after the set time since last access has passed. The default value of
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fs/rc"
|
"github.com/rclone/rclone/fs/rc"
|
||||||
|
"github.com/rclone/rclone/lib/diskusage"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/file"
|
"github.com/rclone/rclone/lib/file"
|
||||||
"github.com/rclone/rclone/vfs/vfscache/writeback"
|
"github.com/rclone/rclone/vfs/vfscache/writeback"
|
||||||
|
@ -607,16 +608,17 @@ func (c *Cache) retryFailedResets() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) purgeClean(quota int64) {
|
// Remove cache files that are not dirty until the quota is satisfied
|
||||||
|
func (c *Cache) purgeClean() {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
var items Items
|
if c.quotasOK() {
|
||||||
|
|
||||||
if quota <= 0 || c.used < quota {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var items Items
|
||||||
|
|
||||||
// Make a slice of clean cache files
|
// Make a slice of clean cache files
|
||||||
for _, item := range c.item {
|
for _, item := range c.item {
|
||||||
if !item.IsDirty() {
|
if !item.IsDirty() {
|
||||||
|
@ -628,7 +630,7 @@ func (c *Cache) purgeClean(quota int64) {
|
||||||
|
|
||||||
// Reset items until the quota is OK
|
// Reset items until the quota is OK
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
if c.used < quota {
|
if c.quotasOK() {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
resetResult, spaceFreed, err := item.Reset()
|
resetResult, spaceFreed, err := item.Reset()
|
||||||
|
@ -661,7 +663,7 @@ func (c *Cache) purgeOld(maxAge time.Duration) {
|
||||||
for _, item := range c.item {
|
for _, item := range c.item {
|
||||||
c.removeNotInUse(item, maxAge, false)
|
c.removeNotInUse(item, maxAge, false)
|
||||||
}
|
}
|
||||||
if c.used < int64(c.opt.CacheMaxSize) {
|
if c.quotasOK() {
|
||||||
c.outOfSpace = false
|
c.outOfSpace = false
|
||||||
c.cond.Broadcast()
|
c.cond.Broadcast()
|
||||||
}
|
}
|
||||||
|
@ -693,16 +695,53 @@ func (c *Cache) updateUsed() (used int64) {
|
||||||
return newUsed
|
return newUsed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check the available space for a disk is in limits.
|
||||||
|
func (c *Cache) minFreeSpaceQuotaOK() bool {
|
||||||
|
if c.opt.CacheMinFreeSpace <= 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
du, err := diskusage.New(config.GetCacheDir())
|
||||||
|
if err == diskusage.ErrUnsupported {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(nil, "disk usage returned error: %v", err)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return du.Available >= uint64(c.opt.CacheMinFreeSpace)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the available quota for a disk is in limits.
|
||||||
|
//
|
||||||
|
// must be called with mu held.
|
||||||
|
func (c *Cache) maxSizeQuotaOK() bool {
|
||||||
|
if c.opt.CacheMaxSize <= 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return c.used <= int64(c.opt.CacheMaxSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the available quotas for a disk is in limits.
|
||||||
|
//
|
||||||
|
// must be called with mu held.
|
||||||
|
func (c *Cache) quotasOK() bool {
|
||||||
|
return c.maxSizeQuotaOK() && c.minFreeSpaceQuotaOK()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return true if any quotas set
|
||||||
|
func (c *Cache) haveQuotas() bool {
|
||||||
|
return c.opt.CacheMaxSize > 0 || c.opt.CacheMinFreeSpace > 0
|
||||||
|
}
|
||||||
|
|
||||||
// Remove clean cache files that are not open until the total space
|
// Remove clean cache files that are not open until the total space
|
||||||
// is reduced below quota starting from the oldest first
|
// is reduced below quota starting from the oldest first
|
||||||
func (c *Cache) purgeOverQuota(quota int64) {
|
func (c *Cache) purgeOverQuota() {
|
||||||
c.updateUsed()
|
c.updateUsed()
|
||||||
|
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
if quota <= 0 || c.used < quota {
|
if c.quotasOK() {
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -719,9 +758,9 @@ func (c *Cache) purgeOverQuota(quota int64) {
|
||||||
|
|
||||||
// Remove items until the quota is OK
|
// Remove items until the quota is OK
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
c.removeNotInUse(item, 0, c.used <= quota)
|
c.removeNotInUse(item, 0, c.quotasOK())
|
||||||
}
|
}
|
||||||
if c.used < quota {
|
if c.quotasOK() {
|
||||||
c.outOfSpace = false
|
c.outOfSpace = false
|
||||||
c.cond.Broadcast()
|
c.cond.Broadcast()
|
||||||
}
|
}
|
||||||
|
@ -743,12 +782,12 @@ func (c *Cache) clean(kicked bool) {
|
||||||
c.purgeOld(c.opt.CacheMaxAge)
|
c.purgeOld(c.opt.CacheMaxAge)
|
||||||
|
|
||||||
// If have a maximum cache size...
|
// If have a maximum cache size...
|
||||||
if int64(c.opt.CacheMaxSize) > 0 {
|
if c.haveQuotas() {
|
||||||
// Remove files not in use until cache size is below quota starting from the oldest first
|
// Remove files not in use until cache size is below quota starting from the oldest first
|
||||||
c.purgeOverQuota(int64(c.opt.CacheMaxSize))
|
c.purgeOverQuota()
|
||||||
|
|
||||||
// Remove cache files that are not dirty if we are still above the max cache size
|
// Remove cache files that are not dirty if we are still above the max cache size
|
||||||
c.purgeClean(int64(c.opt.CacheMaxSize))
|
c.purgeClean()
|
||||||
c.retryFailedResets()
|
c.retryFailedResets()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,10 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/local" // import the local backend
|
_ "github.com/rclone/rclone/backend/local" // import the local backend
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
|
"github.com/rclone/rclone/lib/diskusage"
|
||||||
"github.com/rclone/rclone/vfs/vfscommon"
|
"github.com/rclone/rclone/vfs/vfscommon"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -354,7 +357,8 @@ func TestCachePurgeOverQuota(t *testing.T) {
|
||||||
}, itemAsString(c))
|
}, itemAsString(c))
|
||||||
|
|
||||||
// Check nothing removed
|
// Check nothing removed
|
||||||
c.purgeOverQuota(1)
|
c.opt.CacheMaxSize = 1
|
||||||
|
c.purgeOverQuota()
|
||||||
|
|
||||||
// Close the files
|
// Close the files
|
||||||
require.NoError(t, potato.Close(nil))
|
require.NoError(t, potato.Close(nil))
|
||||||
|
@ -373,7 +377,8 @@ func TestCachePurgeOverQuota(t *testing.T) {
|
||||||
potato2.info.ATime = t1
|
potato2.info.ATime = t1
|
||||||
|
|
||||||
// Check only potato removed to get below quota
|
// Check only potato removed to get below quota
|
||||||
c.purgeOverQuota(10)
|
c.opt.CacheMaxSize = 10
|
||||||
|
c.purgeOverQuota()
|
||||||
assert.Equal(t, int64(6), c.used)
|
assert.Equal(t, int64(6), c.used)
|
||||||
|
|
||||||
assert.Equal(t, []string{
|
assert.Equal(t, []string{
|
||||||
|
@ -399,7 +404,8 @@ func TestCachePurgeOverQuota(t *testing.T) {
|
||||||
potato.info.ATime = t2
|
potato.info.ATime = t2
|
||||||
|
|
||||||
// Check only potato2 removed to get below quota
|
// Check only potato2 removed to get below quota
|
||||||
c.purgeOverQuota(10)
|
c.opt.CacheMaxSize = 10
|
||||||
|
c.purgeOverQuota()
|
||||||
assert.Equal(t, int64(5), c.used)
|
assert.Equal(t, int64(5), c.used)
|
||||||
c.purgeEmptyDirs("", true)
|
c.purgeEmptyDirs("", true)
|
||||||
|
|
||||||
|
@ -408,7 +414,8 @@ func TestCachePurgeOverQuota(t *testing.T) {
|
||||||
}, itemAsString(c))
|
}, itemAsString(c))
|
||||||
|
|
||||||
// Now purge everything
|
// Now purge everything
|
||||||
c.purgeOverQuota(1)
|
c.opt.CacheMaxSize = 1
|
||||||
|
c.purgeOverQuota()
|
||||||
assert.Equal(t, int64(0), c.used)
|
assert.Equal(t, int64(0), c.used)
|
||||||
c.purgeEmptyDirs("", true)
|
c.purgeEmptyDirs("", true)
|
||||||
|
|
||||||
|
@ -420,6 +427,26 @@ func TestCachePurgeOverQuota(t *testing.T) {
|
||||||
assert.Equal(t, []string(nil), itemAsString(c))
|
assert.Equal(t, []string(nil), itemAsString(c))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCachePurgeMinFreeSpace(t *testing.T) {
|
||||||
|
du, err := diskusage.New(config.GetCacheDir())
|
||||||
|
if err == diskusage.ErrUnsupported {
|
||||||
|
t.Skip(err)
|
||||||
|
}
|
||||||
|
// We've tested the quota mechanism already, so just test the
|
||||||
|
// min free space quota is working.
|
||||||
|
_, c := newTestCache(t)
|
||||||
|
|
||||||
|
// First set free space quota very small and check it is OK
|
||||||
|
c.opt.CacheMinFreeSpace = 1
|
||||||
|
assert.True(t, c.minFreeSpaceQuotaOK())
|
||||||
|
assert.True(t, c.quotasOK())
|
||||||
|
|
||||||
|
// Now set it a bit larger than the current disk available and check it is BAD
|
||||||
|
c.opt.CacheMinFreeSpace = fs.SizeSuffix(du.Available) + fs.Gibi
|
||||||
|
assert.False(t, c.minFreeSpaceQuotaOK())
|
||||||
|
assert.False(t, c.quotasOK())
|
||||||
|
}
|
||||||
|
|
||||||
// test reset clean files
|
// test reset clean files
|
||||||
func TestCachePurgeClean(t *testing.T) {
|
func TestCachePurgeClean(t *testing.T) {
|
||||||
r, c := newItemTestCache(t)
|
r, c := newItemTestCache(t)
|
||||||
|
@ -453,7 +480,8 @@ func TestCachePurgeClean(t *testing.T) {
|
||||||
require.NoError(t, potato3.Truncate(6))
|
require.NoError(t, potato3.Truncate(6))
|
||||||
|
|
||||||
c.updateUsed()
|
c.updateUsed()
|
||||||
c.purgeClean(1)
|
c.opt.CacheMaxSize = 1
|
||||||
|
c.purgeClean()
|
||||||
assert.Equal(t, []string{
|
assert.Equal(t, []string{
|
||||||
`name="existing" opens=2 size=100 space=0`,
|
`name="existing" opens=2 size=100 space=0`,
|
||||||
`name="sub/dir/potato2" opens=1 size=5 space=5`,
|
`name="sub/dir/potato2" opens=1 size=5 space=5`,
|
||||||
|
@ -462,7 +490,8 @@ func TestCachePurgeClean(t *testing.T) {
|
||||||
assert.Equal(t, int64(11), c.used)
|
assert.Equal(t, int64(11), c.used)
|
||||||
|
|
||||||
require.NoError(t, potato2.Close(nil))
|
require.NoError(t, potato2.Close(nil))
|
||||||
c.purgeClean(1)
|
c.opt.CacheMaxSize = 1
|
||||||
|
c.purgeClean()
|
||||||
assert.Equal(t, []string{
|
assert.Equal(t, []string{
|
||||||
`name="existing" opens=2 size=100 space=0`,
|
`name="existing" opens=2 size=100 space=0`,
|
||||||
`name="sub/dir/potato3" opens=1 size=6 space=6`,
|
`name="sub/dir/potato3" opens=1 size=6 space=6`,
|
||||||
|
@ -476,7 +505,8 @@ func TestCachePurgeClean(t *testing.T) {
|
||||||
// Remove all files now. The are all not in use.
|
// Remove all files now. The are all not in use.
|
||||||
// purgeClean does not remove empty cache files. purgeOverQuota does.
|
// purgeClean does not remove empty cache files. purgeOverQuota does.
|
||||||
// So we use purgeOverQuota here for the cleanup.
|
// So we use purgeOverQuota here for the cleanup.
|
||||||
c.purgeOverQuota(1)
|
c.opt.CacheMaxSize = 1
|
||||||
|
c.purgeOverQuota()
|
||||||
|
|
||||||
c.purgeEmptyDirs("", true)
|
c.purgeEmptyDirs("", true)
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ type Options struct {
|
||||||
CacheMode CacheMode
|
CacheMode CacheMode
|
||||||
CacheMaxAge time.Duration
|
CacheMaxAge time.Duration
|
||||||
CacheMaxSize fs.SizeSuffix
|
CacheMaxSize fs.SizeSuffix
|
||||||
|
CacheMinFreeSpace fs.SizeSuffix
|
||||||
CachePollInterval time.Duration
|
CachePollInterval time.Duration
|
||||||
CaseInsensitive bool
|
CaseInsensitive bool
|
||||||
WriteWait time.Duration // time to wait for in-sequence write
|
WriteWait time.Duration // time to wait for in-sequence write
|
||||||
|
@ -56,6 +57,7 @@ var DefaultOpt = Options{
|
||||||
ChunkSize: 128 * fs.Mebi,
|
ChunkSize: 128 * fs.Mebi,
|
||||||
ChunkSizeLimit: -1,
|
ChunkSizeLimit: -1,
|
||||||
CacheMaxSize: -1,
|
CacheMaxSize: -1,
|
||||||
|
CacheMinFreeSpace: -1,
|
||||||
CaseInsensitive: runtime.GOOS == "windows" || runtime.GOOS == "darwin", // default to true on Windows and Mac, false otherwise
|
CaseInsensitive: runtime.GOOS == "windows" || runtime.GOOS == "darwin", // default to true on Windows and Mac, false otherwise
|
||||||
WriteWait: 1000 * time.Millisecond,
|
WriteWait: 1000 * time.Millisecond,
|
||||||
ReadWait: 20 * time.Millisecond,
|
ReadWait: 20 * time.Millisecond,
|
||||||
|
|
|
@ -28,6 +28,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||||
flags.DurationVarP(flagSet, &Opt.CachePollInterval, "vfs-cache-poll-interval", "", Opt.CachePollInterval, "Interval to poll the cache for stale objects", "VFS")
|
flags.DurationVarP(flagSet, &Opt.CachePollInterval, "vfs-cache-poll-interval", "", Opt.CachePollInterval, "Interval to poll the cache for stale objects", "VFS")
|
||||||
flags.DurationVarP(flagSet, &Opt.CacheMaxAge, "vfs-cache-max-age", "", Opt.CacheMaxAge, "Max time since last access of objects in the cache", "VFS")
|
flags.DurationVarP(flagSet, &Opt.CacheMaxAge, "vfs-cache-max-age", "", Opt.CacheMaxAge, "Max time since last access of objects in the cache", "VFS")
|
||||||
flags.FVarP(flagSet, &Opt.CacheMaxSize, "vfs-cache-max-size", "", "Max total size of objects in the cache", "VFS")
|
flags.FVarP(flagSet, &Opt.CacheMaxSize, "vfs-cache-max-size", "", "Max total size of objects in the cache", "VFS")
|
||||||
|
flags.FVarP(flagSet, &Opt.CacheMinFreeSpace, "vfs-cache-min-free-space", "", "Target minimum free space on the disk containing the cache", "VFS")
|
||||||
flags.FVarP(flagSet, &Opt.ChunkSize, "vfs-read-chunk-size", "", "Read the source objects in chunks", "VFS")
|
flags.FVarP(flagSet, &Opt.ChunkSize, "vfs-read-chunk-size", "", "Read the source objects in chunks", "VFS")
|
||||||
flags.FVarP(flagSet, &Opt.ChunkSizeLimit, "vfs-read-chunk-size-limit", "", "If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited)", "VFS")
|
flags.FVarP(flagSet, &Opt.ChunkSizeLimit, "vfs-read-chunk-size-limit", "", "If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited)", "VFS")
|
||||||
flags.FVarP(flagSet, DirPerms, "dir-perms", "", "Directory permissions", "VFS")
|
flags.FVarP(flagSet, DirPerms, "dir-perms", "", "Directory permissions", "VFS")
|
||||||
|
|
Loading…
Reference in New Issue
Block a user