mirror of
https://github.com/rclone/rclone.git
synced 2024-11-22 15:30:06 +08:00
vfs cache: make logging consistent and remove some debug logging
This commit is contained in:
parent
c65ed26a7e
commit
b2f4f52b64
|
@ -73,9 +73,9 @@ func New(ctx context.Context, fremote fs.Fs, opt *vfscommon.Options, avFn AddVir
|
|||
fRoot = strings.Replace(fRoot, ":", "", -1)
|
||||
}
|
||||
root := file.UNCPath(filepath.Join(config.CacheDir, "vfs", fremote.Name(), fRoot))
|
||||
fs.Debugf(nil, "vfs cache root is %q", root)
|
||||
fs.Debugf(nil, "vfs cache: root is %q", root)
|
||||
metaRoot := file.UNCPath(filepath.Join(config.CacheDir, "vfsMeta", fremote.Name(), fRoot))
|
||||
fs.Debugf(nil, "vfs metadata cache root is %q", root)
|
||||
fs.Debugf(nil, "vfs cache: metadata root is %q", root)
|
||||
|
||||
fcache, err := fscache.Get(root)
|
||||
if err != nil {
|
||||
|
@ -315,7 +315,7 @@ func (c *Cache) Rename(name string, newName string, newObj fs.Object) (err error
|
|||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
fs.Infof(name, "Renamed in cache to %q", newName)
|
||||
fs.Infof(name, "vfs cache: renamed in cache to %q", newName)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -431,11 +431,11 @@ func (c *Cache) purgeEmptyDirs() {
|
|||
ctx := context.Background()
|
||||
err := operations.Rmdirs(ctx, c.fcache, "", true)
|
||||
if err != nil {
|
||||
fs.Errorf(c.fcache, "Failed to remove empty directories from cache: %v", err)
|
||||
fs.Errorf(c.fcache, "vfs cache: failed to remove empty directories from cache: %v", err)
|
||||
}
|
||||
err = operations.Rmdirs(ctx, c.fcacheMeta, "", true)
|
||||
if err != nil {
|
||||
fs.Errorf(c.fcache, "Failed to remove empty directories from metadata cache: %v", err)
|
||||
fs.Errorf(c.fcache, "vfs cache: failed to remove empty directories from metadata cache: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -523,7 +523,7 @@ func (c *Cache) clean() {
|
|||
c.mu.Unlock()
|
||||
uploadsInProgress, uploadsQueued := c.writeback.Stats()
|
||||
|
||||
fs.Infof(nil, "Cleaned the cache: objects %d (was %d) in use %d, to upload %d, uploading %d, total size %v (was %v)", newItems, oldItems, totalInUse, uploadsQueued, uploadsInProgress, newUsed, oldUsed)
|
||||
fs.Infof(nil, "vfs cache: cleaned: objects %d (was %d) in use %d, to upload %d, uploading %d, total size %v (was %v)", newItems, oldItems, totalInUse, uploadsQueued, uploadsInProgress, newUsed, oldUsed)
|
||||
}
|
||||
|
||||
// cleaner calls clean at regular intervals
|
||||
|
@ -531,7 +531,7 @@ func (c *Cache) clean() {
|
|||
// doesn't return until context is cancelled
|
||||
func (c *Cache) cleaner(ctx context.Context) {
|
||||
if c.opt.CachePollInterval <= 0 {
|
||||
fs.Debugf(nil, "Cache cleaning thread disabled because poll interval <= 0")
|
||||
fs.Debugf(nil, "vfs cache: cleaning thread disabled because poll interval <= 0")
|
||||
return
|
||||
}
|
||||
// Start cleaning the cache immediately
|
||||
|
@ -544,7 +544,7 @@ func (c *Cache) cleaner(ctx context.Context) {
|
|||
case <-timer.C:
|
||||
c.clean()
|
||||
case <-ctx.Done():
|
||||
fs.Debugf(nil, "cache cleaner exiting")
|
||||
fs.Debugf(nil, "vfs cache: cleaner exiting")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/asyncreader"
|
||||
"github.com/rclone/rclone/fs/chunkedreader"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/ranges"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
)
|
||||
|
@ -118,7 +117,7 @@ func New(item Item, opt *vfscommon.Options, remote string, src fs.Object) (dls *
|
|||
case <-ticker.C:
|
||||
err := dls.kickWaiters()
|
||||
if err != nil {
|
||||
fs.Errorf(dls.src, "Failed to kick waiters: %v", err)
|
||||
fs.Errorf(dls.src, "vfs cache: failed to kick waiters: %v", err)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
break
|
||||
|
@ -140,7 +139,7 @@ func New(item Item, opt *vfscommon.Options, remote string, src fs.Object) (dls *
|
|||
func (dls *Downloaders) _countErrors(n int64, err error) {
|
||||
if err == nil && n != 0 {
|
||||
if dls.errorCount != 0 {
|
||||
fs.Infof(dls.src, "Resetting error count to 0")
|
||||
fs.Infof(dls.src, "vfs cache: downloader: resetting error count to 0")
|
||||
dls.errorCount = 0
|
||||
dls.lastErr = nil
|
||||
}
|
||||
|
@ -149,7 +148,7 @@ func (dls *Downloaders) _countErrors(n int64, err error) {
|
|||
if err != nil {
|
||||
dls.errorCount++
|
||||
dls.lastErr = err
|
||||
fs.Infof(dls.src, "Error count now %d: %v", dls.errorCount, err)
|
||||
fs.Infof(dls.src, "vfs cache: downloader: error count now %d: %v", dls.errorCount, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -163,7 +162,7 @@ func (dls *Downloaders) countErrors(n int64, err error) {
|
|||
//
|
||||
// call with lock held
|
||||
func (dls *Downloaders) _newDownloader(r ranges.Range) (dl *downloader, err error) {
|
||||
defer log.Trace(dls.src, "r=%v", r)("err=%v", &err)
|
||||
// defer log.Trace(dls.src, "r=%v", r)("err=%v", &err)
|
||||
|
||||
dl = &downloader{
|
||||
kick: make(chan struct{}, 1),
|
||||
|
@ -189,11 +188,11 @@ func (dls *Downloaders) _newDownloader(r ranges.Range) (dl *downloader, err erro
|
|||
_ = dl.close(err)
|
||||
dl.dls.countErrors(n, err)
|
||||
if err != nil {
|
||||
fs.Errorf(dl.dls.src, "Failed to download: %v", err)
|
||||
fs.Errorf(dl.dls.src, "vfs cache: failed to download: %v", err)
|
||||
}
|
||||
err = dl.dls.kickWaiters()
|
||||
if err != nil {
|
||||
fs.Errorf(dl.dls.src, "Failed to kick waiters: %v", err)
|
||||
fs.Errorf(dl.dls.src, "vfs cache: failed to kick waiters: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -238,7 +237,7 @@ func (dls *Downloaders) Close(inErr error) (err error) {
|
|||
// Download the range passed in returning when it has been downloaded
|
||||
// with an error from the downloading go routine.
|
||||
func (dls *Downloaders) Download(r ranges.Range) (err error) {
|
||||
defer log.Trace(dls.src, "r=%+v", r)("err=%v", &err)
|
||||
// defer log.Trace(dls.src, "r=%+v", r)("err=%v", &err)
|
||||
|
||||
dls.mu.Lock()
|
||||
|
||||
|
@ -384,12 +383,12 @@ func (dls *Downloaders) kickWaiters() (err error) {
|
|||
err = dls._ensureDownloader(waiter.r)
|
||||
if err != nil {
|
||||
// Failures here will be retried by background kicker
|
||||
fs.Errorf(dls.src, "Restart download failed: %v", err)
|
||||
fs.Errorf(dls.src, "vfs cache: restart download failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if dls.errorCount > maxErrorCount {
|
||||
fs.Errorf(dls.src, "Too many errors %d/%d: last error: %v", dls.errorCount, maxErrorCount, dls.lastErr)
|
||||
fs.Errorf(dls.src, "vfs cache: too many errors %d/%d: last error: %v", dls.errorCount, maxErrorCount, dls.lastErr)
|
||||
dls._closeWaiters(dls.lastErr)
|
||||
return dls.lastErr
|
||||
}
|
||||
|
@ -405,7 +404,7 @@ func (dls *Downloaders) kickWaiters() (err error) {
|
|||
//
|
||||
// Implementations must not retain p.
|
||||
func (dl *downloader) Write(p []byte) (n int, err error) {
|
||||
defer log.Trace(dl.dls.src, "p_len=%d", len(p))("n=%d, err=%v", &n, &err)
|
||||
// defer log.Trace(dl.dls.src, "p_len=%d", len(p))("n=%d, err=%v", &n, &err)
|
||||
|
||||
// Kick the waiters on exit if some characters received
|
||||
defer func() {
|
||||
|
@ -441,7 +440,7 @@ func (dl *downloader) Write(p []byte) (n int, err error) {
|
|||
// stop any future reading
|
||||
dl.mu.Lock()
|
||||
if !dl.stop {
|
||||
fs.Debugf(dl.dls.src, "stopping download thread as it timed out")
|
||||
fs.Debugf(dl.dls.src, "vfs cache: stopping download thread as it timed out")
|
||||
dl._stop()
|
||||
}
|
||||
}
|
||||
|
@ -457,7 +456,7 @@ func (dl *downloader) Write(p []byte) (n int, err error) {
|
|||
|
||||
// Kill this downloader if skipped too many bytes
|
||||
if !dl.stop && dl.skipped > maxSkipBytes {
|
||||
fs.Debugf(dl.dls.src, "stopping download thread as it has skipped %d bytes", dl.skipped)
|
||||
fs.Debugf(dl.dls.src, "vfs cache: stopping download thread as it has skipped %d bytes", dl.skipped)
|
||||
dl._stop()
|
||||
}
|
||||
|
||||
|
@ -475,7 +474,7 @@ func (dl *downloader) Write(p []byte) (n int, err error) {
|
|||
//
|
||||
// should be called on a fresh downloader
|
||||
func (dl *downloader) open(offset int64) (err error) {
|
||||
defer log.Trace(dl.dls.src, "offset=%d", offset)("err=%v", &err)
|
||||
// defer log.Trace(dl.dls.src, "offset=%d", offset)("err=%v", &err)
|
||||
dl.tr = accounting.Stats(dl.dls.ctx).NewTransfer(dl.dls.src)
|
||||
|
||||
size := dl.dls.src.Size()
|
||||
|
@ -508,7 +507,7 @@ func (dl *downloader) open(offset int64) (err error) {
|
|||
|
||||
// close the downloader
|
||||
func (dl *downloader) close(inErr error) (err error) {
|
||||
defer log.Trace(dl.dls.src, "inErr=%v", err)("err=%v", &err)
|
||||
// defer log.Trace(dl.dls.src, "inErr=%v", err)("err=%v", &err)
|
||||
checkErr := func(e error) {
|
||||
if e == nil || errors.Cause(err) == asyncreader.ErrorStreamAbandoned {
|
||||
return
|
||||
|
@ -540,7 +539,7 @@ func (dl *downloader) closed() bool {
|
|||
//
|
||||
// Call with the mutex held
|
||||
func (dl *downloader) _stop() {
|
||||
defer log.Trace(dl.dls.src, "")("")
|
||||
// defer log.Trace(dl.dls.src, "")("")
|
||||
|
||||
// exit if have already called _stop
|
||||
if dl.stop {
|
||||
|
@ -575,7 +574,7 @@ func (dl *downloader) stopAndClose(inErr error) (err error) {
|
|||
|
||||
// Start downloading to the local file starting at offset until maxOffset.
|
||||
func (dl *downloader) download() (n int64, err error) {
|
||||
defer log.Trace(dl.dls.src, "")("err=%v", &err)
|
||||
// defer log.Trace(dl.dls.src, "")("err=%v", &err)
|
||||
n, err = dl.in.WriteTo(dl)
|
||||
if err != nil && errors.Cause(err) != asyncreader.ErrorStreamAbandoned {
|
||||
return n, errors.Wrap(err, "vfs reader: failed to write to cache file")
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/ranges"
|
||||
|
@ -223,14 +222,14 @@ func (item *Item) _truncate(size int64) (err error) {
|
|||
osPath := item.c.toOSPath(item.name) // No locking in Cache
|
||||
fd, err = file.OpenFile(osPath, os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "vfs item truncate: failed to open cache file")
|
||||
return errors.Wrap(err, "vfs cache: truncate: failed to open cache file")
|
||||
}
|
||||
|
||||
defer fs.CheckClose(fd, &err)
|
||||
|
||||
err = file.SetSparse(fd)
|
||||
if err != nil {
|
||||
fs.Debugf(item.name, "vfs item truncate: failed to set as a sparse file: %v", err)
|
||||
fs.Debugf(item.name, "vfs cache: truncate: failed to set as a sparse file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -238,7 +237,7 @@ func (item *Item) _truncate(size int64) (err error) {
|
|||
|
||||
err = fd.Truncate(size)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "vfs truncate: failed to truncate")
|
||||
return errors.Wrap(err, "vfs cache: truncate")
|
||||
}
|
||||
|
||||
item.info.Size = size
|
||||
|
@ -403,7 +402,7 @@ func (item *Item) IsDirty() bool {
|
|||
// Open the local file from the object passed in (which may be nil)
|
||||
// which implies we are about to create the file
|
||||
func (item *Item) Open(o fs.Object) (err error) {
|
||||
defer log.Trace(o, "item=%p", item)("err=%v", &err)
|
||||
// defer log.Trace(o, "item=%p", item)("err=%v", &err)
|
||||
item.mu.Lock()
|
||||
defer item.mu.Unlock()
|
||||
|
||||
|
@ -434,7 +433,7 @@ func (item *Item) Open(o fs.Object) (err error) {
|
|||
}
|
||||
err = file.SetSparse(fd)
|
||||
if err != nil {
|
||||
fs.Debugf(item.name, "vfs cache item: failed to set as a sparse file: %v", err)
|
||||
fs.Debugf(item.name, "vfs cache: failed to set as a sparse file: %v", err)
|
||||
}
|
||||
item.fd = fd
|
||||
|
||||
|
@ -479,7 +478,7 @@ func (item *Item) Open(o fs.Object) (err error) {
|
|||
//
|
||||
// Call with lock held
|
||||
func (item *Item) _store(ctx context.Context, storeFn StoreFn) (err error) {
|
||||
defer log.Trace(item.name, "item=%p", item)("err=%v", &err)
|
||||
// defer log.Trace(item.name, "item=%p", item)("err=%v", &err)
|
||||
|
||||
// Transfer the temp file to the remote
|
||||
cacheObj, err := item.c.fcache.NewObject(ctx, item.name)
|
||||
|
@ -503,10 +502,10 @@ func (item *Item) _store(ctx context.Context, storeFn StoreFn) (err error) {
|
|||
item.info.Dirty = false
|
||||
err = item._save()
|
||||
if err != nil {
|
||||
fs.Errorf(item.name, "Failed to write metadata file: %v", err)
|
||||
fs.Errorf(item.name, "vfs cache: failed to write metadata file: %v", err)
|
||||
}
|
||||
if storeFn != nil && item.o != nil {
|
||||
fs.Debugf(item.name, "writeback object to VFS layer")
|
||||
fs.Debugf(item.name, "vfs cache: writeback object to VFS layer")
|
||||
// Write the object back to the VFS layer as last
|
||||
// thing we do with mutex unlocked
|
||||
item.mu.Unlock()
|
||||
|
@ -526,7 +525,7 @@ func (item *Item) store(ctx context.Context, storeFn StoreFn) (err error) {
|
|||
|
||||
// Close the cache file
|
||||
func (item *Item) Close(storeFn StoreFn) (err error) {
|
||||
defer log.Trace(item.o, "Item.Close")("err=%v", &err)
|
||||
// defer log.Trace(item.o, "Item.Close")("err=%v", &err)
|
||||
var (
|
||||
downloaders *downloaders.Downloaders
|
||||
syncWriteBack = item.c.opt.WriteBack <= 0
|
||||
|
@ -562,7 +561,7 @@ func (item *Item) Close(storeFn StoreFn) (err error) {
|
|||
// Accumulate and log errors
|
||||
checkErr := func(e error) {
|
||||
if e != nil {
|
||||
fs.Errorf(item.o, "vfs cache item close failed: %v", e)
|
||||
fs.Errorf(item.o, "vfs cache: item close failed: %v", e)
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
|
@ -608,7 +607,7 @@ func (item *Item) Close(storeFn StoreFn) (err error) {
|
|||
|
||||
// upload the file to backing store if changed
|
||||
if item.info.Dirty {
|
||||
fs.Debugf(item.name, "item changed - writeback in %v", item.c.opt.WriteBack)
|
||||
fs.Infof(item.name, "vfs cache: queuing for upload in %v", item.c.opt.WriteBack)
|
||||
if syncWriteBack {
|
||||
// do synchronous writeback
|
||||
checkErr(item._store(context.Background(), storeFn))
|
||||
|
@ -806,13 +805,13 @@ func (item *Item) FindMissing(r ranges.Range) (outr ranges.Range) {
|
|||
//
|
||||
// call with the item lock held
|
||||
func (item *Item) _ensure(offset, size int64) (err error) {
|
||||
defer log.Trace(item.name, "offset=%d, size=%d", offset, size)("err=%v", &err)
|
||||
// defer log.Trace(item.name, "offset=%d, size=%d", offset, size)("err=%v", &err)
|
||||
if offset+size > item.info.Size {
|
||||
size = item.info.Size - offset
|
||||
}
|
||||
r := ranges.Range{Pos: offset, Size: size}
|
||||
present := item.info.Rs.Present(r)
|
||||
fs.Debugf(nil, "looking for range=%+v in %+v - present %v", r, item.info.Rs, present)
|
||||
fs.Debugf(nil, "vfs cache: looking for range=%+v in %+v - present %v", r, item.info.Rs, present)
|
||||
item.mu.Unlock()
|
||||
defer item.mu.Lock()
|
||||
if present {
|
||||
|
@ -840,7 +839,7 @@ func (item *Item) _ensure(offset, size int64) (err error) {
|
|||
//
|
||||
// call with lock held
|
||||
func (item *Item) _written(offset, size int64) {
|
||||
defer log.Trace(item.name, "offset=%d, size=%d", offset, size)("")
|
||||
// defer log.Trace(item.name, "offset=%d, size=%d", offset, size)("")
|
||||
item.info.Rs.Insert(ranges.Range{Pos: offset, Size: size})
|
||||
item.metaDirty = true
|
||||
}
|
||||
|
@ -855,7 +854,7 @@ func (item *Item) _updateFingerprint() {
|
|||
oldFingerprint := item.info.Fingerprint
|
||||
item.info.Fingerprint = fs.Fingerprint(context.TODO(), item.o, false)
|
||||
if oldFingerprint != item.info.Fingerprint {
|
||||
fs.Debugf(item.o, "fingerprint now %q", item.info.Fingerprint)
|
||||
fs.Debugf(item.o, "vfs cache: fingerprint now %q", item.info.Fingerprint)
|
||||
item.metaDirty = true
|
||||
}
|
||||
}
|
||||
|
@ -868,13 +867,13 @@ func (item *Item) _setModTime(modTime time.Time) {
|
|||
osPath := item.c.toOSPath(item.name) // No locking in Cache
|
||||
err := os.Chtimes(osPath, modTime, modTime)
|
||||
if err != nil {
|
||||
fs.Errorf(item.name, "Failed to set modification time of cached file: %v", err)
|
||||
fs.Errorf(item.name, "vfs cache: failed to set modification time of cached file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// setModTime of the cache file and in the Item
|
||||
func (item *Item) setModTime(modTime time.Time) {
|
||||
defer log.Trace(item.name, "modTime=%v", modTime)("")
|
||||
// defer log.Trace(item.name, "modTime=%v", modTime)("")
|
||||
item.mu.Lock()
|
||||
item._updateFingerprint()
|
||||
item._setModTime(modTime)
|
||||
|
@ -961,10 +960,10 @@ func (item *Item) WriteAtNoOverwrite(b []byte, off int64) (n int, skipped int, e
|
|||
)
|
||||
|
||||
// Write the range out ignoring already written chunks
|
||||
fs.Debugf(item.name, "Ranges = %v", item.info.Rs)
|
||||
// fs.Debugf(item.name, "Ranges = %v", item.info.Rs)
|
||||
for i := range foundRanges {
|
||||
foundRange := &foundRanges[i]
|
||||
fs.Debugf(item.name, "foundRange[%d] = %v", i, foundRange)
|
||||
// fs.Debugf(item.name, "foundRange[%d] = %v", i, foundRange)
|
||||
if foundRange.R.Pos != off {
|
||||
err = errors.New("internal error: offset of range is wrong")
|
||||
break
|
||||
|
@ -972,12 +971,12 @@ func (item *Item) WriteAtNoOverwrite(b []byte, off int64) (n int, skipped int, e
|
|||
size := int(foundRange.R.Size)
|
||||
if foundRange.Present {
|
||||
// if present want to skip this range
|
||||
fs.Debugf(item.name, "skip chunk offset=%d size=%d", off, size)
|
||||
// fs.Debugf(item.name, "skip chunk offset=%d size=%d", off, size)
|
||||
nn = size
|
||||
skipped += size
|
||||
} else {
|
||||
// if range not present then we want to write it
|
||||
fs.Debugf(item.name, "write chunk offset=%d size=%d", off, size)
|
||||
// fs.Debugf(item.name, "write chunk offset=%d size=%d", off, size)
|
||||
nn, err = item.fd.WriteAt(b[:size], off)
|
||||
if err == nil && nn != size {
|
||||
err = errors.Errorf("downloader: short write: tried to write %d but only %d written", size, nn)
|
||||
|
|
|
@ -207,7 +207,7 @@ func (wb *WriteBack) _stopTimer() {
|
|||
return
|
||||
}
|
||||
wb.expiry = time.Time{}
|
||||
fs.Debugf(nil, "resetTimer STOP")
|
||||
// fs.Debugf(nil, "resetTimer STOP")
|
||||
if wb.timer != nil {
|
||||
wb.timer.Stop()
|
||||
wb.timer = nil
|
||||
|
@ -228,7 +228,7 @@ func (wb *WriteBack) _resetTimer() {
|
|||
if dt < 0 {
|
||||
dt = 0
|
||||
}
|
||||
fs.Debugf(nil, "resetTimer dt=%v", dt)
|
||||
// fs.Debugf(nil, "resetTimer dt=%v", dt)
|
||||
if wb.timer != nil {
|
||||
wb.timer.Stop()
|
||||
}
|
||||
|
@ -330,13 +330,14 @@ func (wb *WriteBack) upload(ctx context.Context, wbItem *writeBackItem) {
|
|||
putFn := wbItem.putFn
|
||||
wbItem.tries++
|
||||
|
||||
fs.Debugf(wbItem.name, "vfs cache: starting upload")
|
||||
|
||||
wb.mu.Unlock()
|
||||
err := putFn(ctx)
|
||||
wb.mu.Lock()
|
||||
|
||||
wbItem.cancel() // cancel context to release resources since store done
|
||||
|
||||
//fs.Debugf(wbItem.name, "uploading = false %p item %p", wbItem, wbItem.item)
|
||||
wbItem.uploading = false
|
||||
wb.uploads--
|
||||
|
||||
|
@ -347,7 +348,7 @@ func (wb *WriteBack) upload(ctx context.Context, wbItem *writeBackItem) {
|
|||
wbItem.delay = maxUploadDelay
|
||||
}
|
||||
if _, uerr := fserrors.Cause(err); uerr == context.Canceled {
|
||||
fs.Infof(wbItem.name, "vfs cache: upload canceled sucessfully")
|
||||
fs.Infof(wbItem.name, "vfs cache: upload canceled")
|
||||
// Upload was cancelled so reset timer
|
||||
wbItem.delay = wb.opt.WriteBack
|
||||
} else {
|
||||
|
@ -372,7 +373,7 @@ func (wb *WriteBack) _cancelUpload(wbItem *writeBackItem) {
|
|||
if !wbItem.uploading {
|
||||
return
|
||||
}
|
||||
fs.Infof(wbItem.name, "vfs cache: cancelling upload")
|
||||
fs.Debugf(wbItem.name, "vfs cache: cancelling upload")
|
||||
if wbItem.cancel != nil {
|
||||
// Cancel the upload - this may or may not be effective
|
||||
wbItem.cancel()
|
||||
|
@ -386,7 +387,7 @@ func (wb *WriteBack) _cancelUpload(wbItem *writeBackItem) {
|
|||
}
|
||||
// uploading items are not on the heap so add them back
|
||||
wb._pushItem(wbItem)
|
||||
fs.Infof(wbItem.name, "vfs cache: cancelled upload")
|
||||
fs.Debugf(wbItem.name, "vfs cache: cancelled upload")
|
||||
}
|
||||
|
||||
// cancelUpload cancels the upload of the item if there is one in progress
|
||||
|
|
Loading…
Reference in New Issue
Block a user