2022-08-28 19:21:57 +08:00
|
|
|
// Package downloaders provides utilities for the VFS layer
|
2020-06-22 23:31:08 +08:00
|
|
|
package downloaders
|
2020-03-01 02:08:22 +08:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2021-11-04 18:12:57 +08:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2020-03-01 02:08:22 +08:00
|
|
|
"sync"
|
2020-06-04 16:33:50 +08:00
|
|
|
"time"
|
2020-03-01 02:08:22 +08:00
|
|
|
|
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/accounting"
|
|
|
|
"github.com/rclone/rclone/fs/asyncreader"
|
2020-06-04 16:33:50 +08:00
|
|
|
"github.com/rclone/rclone/fs/chunkedreader"
|
2020-08-25 23:20:29 +08:00
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
2020-03-01 02:08:22 +08:00
|
|
|
"github.com/rclone/rclone/lib/ranges"
|
2020-06-22 23:31:08 +08:00
|
|
|
"github.com/rclone/rclone/vfs/vfscommon"
|
2020-03-01 02:08:22 +08:00
|
|
|
)
|
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
// FIXME implement max downloaders
|
|
|
|
|
|
|
|
const (
|
|
|
|
// max time a downloader can be idle before closing itself
|
|
|
|
maxDownloaderIdleTime = 5 * time.Second
|
|
|
|
// max number of bytes a reader should skip over before closing it
|
|
|
|
maxSkipBytes = 1024 * 1024
|
2020-06-12 21:53:47 +08:00
|
|
|
// time between background kicks of waiters to pick up errors
|
|
|
|
backgroundKickerInterval = 5 * time.Second
|
2020-06-22 21:39:13 +08:00
|
|
|
// maximum number of errors before declaring dead
|
|
|
|
maxErrorCount = 10
|
2020-12-30 02:57:41 +08:00
|
|
|
// If a downloader is within this range or --buffer-size
|
|
|
|
// whichever is the larger, we will reuse the downloader
|
|
|
|
minWindow = 1024 * 1024
|
2020-06-04 16:33:50 +08:00
|
|
|
)
|
|
|
|
|
2020-06-22 23:31:08 +08:00
|
|
|
// Item is the interface that an item to download must obey
|
|
|
|
type Item interface {
|
|
|
|
// FindMissing adjusts r returning a new ranges.Range which only
|
|
|
|
// contains the range which needs to be downloaded. This could be
|
|
|
|
// empty - check with IsEmpty. It also adjust this to make sure it is
|
|
|
|
// not larger than the file.
|
|
|
|
FindMissing(r ranges.Range) (outr ranges.Range)
|
|
|
|
|
|
|
|
// HasRange returns true if the current ranges entirely include range
|
|
|
|
HasRange(r ranges.Range) bool
|
|
|
|
|
|
|
|
// WriteAtNoOverwrite writes b to the file, but will not overwrite
|
|
|
|
// already present ranges.
|
|
|
|
//
|
|
|
|
// This is used by the downloader to write bytes to the file
|
|
|
|
//
|
|
|
|
// It returns n the total bytes processed and skipped the number of
|
|
|
|
// bytes which were processed but not actually written to the file.
|
|
|
|
WriteAtNoOverwrite(b []byte, off int64) (n int, skipped int, err error)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Downloaders is a number of downloader~s and a queue of waiters
|
|
|
|
// waiting for segments to be downloaded to a file.
|
|
|
|
type Downloaders struct {
|
2020-06-04 16:33:50 +08:00
|
|
|
// Write once - no locking required
|
2020-03-01 02:08:22 +08:00
|
|
|
ctx context.Context
|
2020-06-12 21:53:47 +08:00
|
|
|
cancel context.CancelFunc
|
2020-06-22 23:31:08 +08:00
|
|
|
item Item
|
|
|
|
opt *vfscommon.Options
|
2020-03-01 02:08:22 +08:00
|
|
|
src fs.Object // source object
|
2020-06-04 16:33:50 +08:00
|
|
|
remote string
|
2020-06-12 21:53:47 +08:00
|
|
|
wg sync.WaitGroup
|
2020-03-01 02:08:22 +08:00
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
// Read write
|
2020-06-22 21:39:13 +08:00
|
|
|
mu sync.Mutex
|
|
|
|
dls []*downloader
|
|
|
|
waiters []waiter
|
|
|
|
errorCount int // number of consecutive errors
|
|
|
|
lastErr error // last error received
|
2020-03-01 02:08:22 +08:00
|
|
|
}
|
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
// waiter is a range we are waiting for and a channel to signal when
|
|
|
|
// the range is found
|
2020-03-01 02:08:22 +08:00
|
|
|
type waiter struct {
|
|
|
|
r ranges.Range
|
|
|
|
errChan chan<- error
|
|
|
|
}
|
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
// downloader represents a running download for part of a file.
|
|
|
|
type downloader struct {
|
|
|
|
// Write once
|
2020-06-22 23:31:08 +08:00
|
|
|
dls *Downloaders // parent structure
|
2020-06-04 16:33:50 +08:00
|
|
|
quit chan struct{} // close to quit the downloader
|
|
|
|
wg sync.WaitGroup // to keep track of downloader goroutine
|
|
|
|
kick chan struct{} // kick the downloader when needed
|
|
|
|
|
|
|
|
// Read write
|
|
|
|
mu sync.Mutex
|
|
|
|
start int64 // start offset
|
|
|
|
offset int64 // current offset
|
|
|
|
maxOffset int64 // maximum offset we are reading to
|
|
|
|
tr *accounting.Transfer
|
|
|
|
in *accounting.Account // input we are reading from
|
|
|
|
skipped int64 // number of bytes we have skipped sequentially
|
|
|
|
_closed bool // set to true if downloader is closed
|
|
|
|
stop bool // set to true if we have called _stop()
|
|
|
|
}
|
2020-03-01 02:08:22 +08:00
|
|
|
|
2020-06-22 23:31:08 +08:00
|
|
|
// New makes a downloader for item
|
|
|
|
func New(item Item, opt *vfscommon.Options, remote string, src fs.Object) (dls *Downloaders) {
|
2020-06-12 21:53:47 +08:00
|
|
|
if src == nil {
|
|
|
|
panic("internal error: newDownloaders called with nil src object")
|
|
|
|
}
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2020-06-22 23:31:08 +08:00
|
|
|
dls = &Downloaders{
|
2020-06-12 21:53:47 +08:00
|
|
|
ctx: ctx,
|
|
|
|
cancel: cancel,
|
2020-03-01 02:08:22 +08:00
|
|
|
item: item,
|
2020-06-22 23:31:08 +08:00
|
|
|
opt: opt,
|
2020-03-01 02:08:22 +08:00
|
|
|
src: src,
|
2020-06-04 16:33:50 +08:00
|
|
|
remote: remote,
|
2020-03-01 02:08:22 +08:00
|
|
|
}
|
2020-06-12 21:53:47 +08:00
|
|
|
dls.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer dls.wg.Done()
|
|
|
|
ticker := time.NewTicker(backgroundKickerInterval)
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
err := dls.kickWaiters()
|
|
|
|
if err != nil {
|
2020-07-06 23:06:42 +08:00
|
|
|
fs.Errorf(dls.src, "vfs cache: failed to kick waiters: %v", err)
|
2020-06-12 21:53:47 +08:00
|
|
|
}
|
|
|
|
case <-ctx.Done():
|
|
|
|
break
|
|
|
|
}
|
|
|
|
ticker.Stop()
|
|
|
|
}()
|
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
return dls
|
|
|
|
}
|
2020-03-01 02:08:22 +08:00
|
|
|
|
2020-06-22 21:39:13 +08:00
|
|
|
// Accumulate errors for this downloader
|
|
|
|
//
|
|
|
|
// It should be called with
|
|
|
|
//
|
2022-08-05 23:35:41 +08:00
|
|
|
// n bytes downloaded
|
|
|
|
// err is error from download
|
2020-06-22 21:39:13 +08:00
|
|
|
//
|
|
|
|
// call with lock held
|
2020-06-22 23:31:08 +08:00
|
|
|
func (dls *Downloaders) _countErrors(n int64, err error) {
|
2020-06-22 21:39:13 +08:00
|
|
|
if err == nil && n != 0 {
|
|
|
|
if dls.errorCount != 0 {
|
2020-07-06 23:06:42 +08:00
|
|
|
fs.Infof(dls.src, "vfs cache: downloader: resetting error count to 0")
|
2020-06-22 21:39:13 +08:00
|
|
|
dls.errorCount = 0
|
|
|
|
dls.lastErr = nil
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
2020-08-25 23:20:29 +08:00
|
|
|
//if err != syscall.ENOSPC {
|
2020-06-22 21:39:13 +08:00
|
|
|
dls.errorCount++
|
2020-08-25 23:20:29 +08:00
|
|
|
//}
|
2020-06-22 21:39:13 +08:00
|
|
|
dls.lastErr = err
|
2020-07-06 23:06:42 +08:00
|
|
|
fs.Infof(dls.src, "vfs cache: downloader: error count now %d: %v", dls.errorCount, err)
|
2020-06-22 21:39:13 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-22 23:31:08 +08:00
|
|
|
func (dls *Downloaders) countErrors(n int64, err error) {
|
2020-06-22 21:39:13 +08:00
|
|
|
dls.mu.Lock()
|
|
|
|
dls._countErrors(n, err)
|
|
|
|
dls.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
// Make a new downloader, starting it to download r
|
|
|
|
//
|
|
|
|
// call with lock held
|
2020-06-22 23:31:08 +08:00
|
|
|
func (dls *Downloaders) _newDownloader(r ranges.Range) (dl *downloader, err error) {
|
2020-07-06 23:06:42 +08:00
|
|
|
// defer log.Trace(dls.src, "r=%v", r)("err=%v", &err)
|
2020-06-04 16:33:50 +08:00
|
|
|
|
|
|
|
dl = &downloader{
|
|
|
|
kick: make(chan struct{}, 1),
|
|
|
|
quit: make(chan struct{}),
|
|
|
|
dls: dls,
|
|
|
|
start: r.Pos,
|
|
|
|
offset: r.Pos,
|
|
|
|
maxOffset: r.End(),
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dl.open(dl.offset)
|
|
|
|
if err != nil {
|
|
|
|
_ = dl.close(err)
|
2021-11-04 18:12:57 +08:00
|
|
|
return nil, fmt.Errorf("failed to open downloader: %w", err)
|
2020-03-01 02:08:22 +08:00
|
|
|
}
|
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
dls.dls = append(dls.dls, dl)
|
|
|
|
|
|
|
|
dl.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer dl.wg.Done()
|
2020-06-22 21:39:13 +08:00
|
|
|
n, err := dl.download()
|
2020-06-04 16:33:50 +08:00
|
|
|
_ = dl.close(err)
|
2020-06-22 21:39:13 +08:00
|
|
|
dl.dls.countErrors(n, err)
|
2020-06-04 16:33:50 +08:00
|
|
|
if err != nil {
|
2020-07-06 23:06:42 +08:00
|
|
|
fs.Errorf(dl.dls.src, "vfs cache: failed to download: %v", err)
|
2020-06-04 16:33:50 +08:00
|
|
|
}
|
|
|
|
err = dl.dls.kickWaiters()
|
|
|
|
if err != nil {
|
2020-07-06 23:06:42 +08:00
|
|
|
fs.Errorf(dl.dls.src, "vfs cache: failed to kick waiters: %v", err)
|
2020-06-04 16:33:50 +08:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2020-03-01 02:08:22 +08:00
|
|
|
return dl, nil
|
|
|
|
}
|
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
// _removeClosed() removes any downloaders which are closed.
|
|
|
|
//
|
|
|
|
// Call with the mutex held
|
2020-06-22 23:31:08 +08:00
|
|
|
func (dls *Downloaders) _removeClosed() {
|
2020-06-04 16:33:50 +08:00
|
|
|
newDownloaders := dls.dls[:0]
|
|
|
|
for _, dl := range dls.dls {
|
|
|
|
if !dl.closed() {
|
|
|
|
newDownloaders = append(newDownloaders, dl)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dls.dls = newDownloaders
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close all running downloaders and return any unfulfilled waiters
|
|
|
|
// with inErr
|
2020-06-22 23:31:08 +08:00
|
|
|
func (dls *Downloaders) Close(inErr error) (err error) {
|
2020-06-04 16:33:50 +08:00
|
|
|
dls.mu.Lock()
|
|
|
|
defer dls.mu.Unlock()
|
|
|
|
dls._removeClosed()
|
|
|
|
for _, dl := range dls.dls {
|
|
|
|
dls.mu.Unlock()
|
|
|
|
closeErr := dl.stopAndClose(inErr)
|
|
|
|
dls.mu.Lock()
|
|
|
|
if closeErr != nil && err != nil {
|
|
|
|
err = closeErr
|
|
|
|
}
|
|
|
|
}
|
2020-06-12 21:53:47 +08:00
|
|
|
dls.cancel()
|
2020-09-15 16:32:16 +08:00
|
|
|
// dls may have entered the periodical (every 5 seconds) kickWaiters() call
|
|
|
|
// unlock the mutex to allow it to finish so that we can get its dls.wg.Done()
|
|
|
|
dls.mu.Unlock()
|
2020-06-12 21:53:47 +08:00
|
|
|
dls.wg.Wait()
|
2020-09-15 16:32:16 +08:00
|
|
|
dls.mu.Lock()
|
2020-06-04 16:33:50 +08:00
|
|
|
dls.dls = nil
|
|
|
|
dls._dispatchWaiters()
|
|
|
|
dls._closeWaiters(inErr)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-06-22 23:31:08 +08:00
|
|
|
// Download the range passed in returning when it has been downloaded
|
|
|
|
// with an error from the downloading go routine.
|
|
|
|
func (dls *Downloaders) Download(r ranges.Range) (err error) {
|
2020-07-06 23:06:42 +08:00
|
|
|
// defer log.Trace(dls.src, "r=%+v", r)("err=%v", &err)
|
2020-06-04 16:33:50 +08:00
|
|
|
|
|
|
|
dls.mu.Lock()
|
|
|
|
|
|
|
|
errChan := make(chan error)
|
|
|
|
waiter := waiter{
|
|
|
|
r: r,
|
|
|
|
errChan: errChan,
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dls._ensureDownloader(r)
|
|
|
|
if err != nil {
|
|
|
|
dls.mu.Unlock()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dls.waiters = append(dls.waiters, waiter)
|
|
|
|
dls.mu.Unlock()
|
|
|
|
return <-errChan
|
|
|
|
}
|
|
|
|
|
2020-03-01 02:08:22 +08:00
|
|
|
// close any waiters with the error passed in
|
|
|
|
//
|
|
|
|
// call with lock held
|
2020-06-22 23:31:08 +08:00
|
|
|
func (dls *Downloaders) _closeWaiters(err error) {
|
2020-06-04 16:33:50 +08:00
|
|
|
for _, waiter := range dls.waiters {
|
2020-03-01 02:08:22 +08:00
|
|
|
waiter.errChan <- err
|
|
|
|
}
|
2020-06-04 16:33:50 +08:00
|
|
|
dls.waiters = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ensure a downloader is running for the range if required. If one isn't found
|
|
|
|
// then it starts it.
|
|
|
|
//
|
|
|
|
// call with lock held
|
2020-06-22 23:31:08 +08:00
|
|
|
func (dls *Downloaders) _ensureDownloader(r ranges.Range) (err error) {
|
2020-08-05 22:29:45 +08:00
|
|
|
// defer log.Trace(dls.src, "r=%v", r)("err=%v", &err)
|
|
|
|
|
2020-08-05 16:57:03 +08:00
|
|
|
// The window includes potentially unread data in the buffer
|
2020-11-05 19:33:32 +08:00
|
|
|
window := int64(fs.GetConfig(context.TODO()).BufferSize)
|
2020-06-04 16:33:50 +08:00
|
|
|
|
2020-08-05 16:57:03 +08:00
|
|
|
// Increase the read range by the read ahead if set
|
|
|
|
if dls.opt.ReadAhead > 0 {
|
|
|
|
r.Size += int64(dls.opt.ReadAhead)
|
|
|
|
}
|
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
// We may be reopening a downloader after a failure here or
|
|
|
|
// doing a tentative prefetch so check to see that we haven't
|
|
|
|
// read some stuff already.
|
|
|
|
//
|
|
|
|
// Clip r to stuff which needs downloading
|
2020-06-22 23:31:08 +08:00
|
|
|
r = dls.item.FindMissing(r)
|
2020-06-04 16:33:50 +08:00
|
|
|
|
|
|
|
// If the range is entirely present then we only need to start a
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 08:17:24 +08:00
|
|
|
// downloader if the window isn't full.
|
2020-08-05 22:29:45 +08:00
|
|
|
startNew := true
|
2020-06-04 16:33:50 +08:00
|
|
|
if r.IsEmpty() {
|
|
|
|
// Make a new range which includes the window
|
|
|
|
rWindow := r
|
2020-08-05 22:29:45 +08:00
|
|
|
rWindow.Size += window
|
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
// Clip rWindow to stuff which needs downloading
|
2020-08-05 22:29:45 +08:00
|
|
|
rWindowClipped := dls.item.FindMissing(rWindow)
|
|
|
|
|
|
|
|
// If rWindowClipped is empty then don't start a new downloader
|
|
|
|
// if there isn't an existing one as there is no data within the
|
|
|
|
// window which needs downloading. We do want to kick an
|
|
|
|
// existing one though to stop it timing out.
|
|
|
|
if rWindowClipped.IsEmpty() {
|
|
|
|
// Don't start any more downloaders
|
|
|
|
startNew = false
|
|
|
|
// Start downloading at the start of the unread window
|
|
|
|
// This likely has been downloaded already but it will
|
|
|
|
// kick the downloader
|
|
|
|
r.Pos = rWindow.End()
|
|
|
|
} else {
|
|
|
|
// Start downloading at the start of the unread window
|
|
|
|
r.Pos = rWindowClipped.Pos
|
2020-06-04 16:33:50 +08:00
|
|
|
}
|
|
|
|
// But don't write anything for the moment
|
|
|
|
r.Size = 0
|
|
|
|
}
|
|
|
|
|
2020-12-30 02:57:41 +08:00
|
|
|
// If buffer size is less than minWindow then make it that
|
|
|
|
if window < minWindow {
|
|
|
|
window = minWindow
|
|
|
|
}
|
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
var dl *downloader
|
|
|
|
// Look through downloaders to find one in range
|
|
|
|
// If there isn't one then start a new one
|
|
|
|
dls._removeClosed()
|
|
|
|
for _, dl = range dls.dls {
|
2020-08-05 16:57:03 +08:00
|
|
|
start, offset := dl.getRange()
|
2020-06-04 16:33:50 +08:00
|
|
|
|
|
|
|
// The downloader's offset to offset+window is the gap
|
2023-09-23 19:20:01 +08:00
|
|
|
// in which we would like to reuse this
|
2020-06-04 16:33:50 +08:00
|
|
|
// downloader. The downloader will never reach before
|
2020-08-05 16:57:03 +08:00
|
|
|
// start and offset+windows is too far away - we'd
|
2020-06-04 16:33:50 +08:00
|
|
|
// rather start another downloader.
|
2020-08-05 16:57:03 +08:00
|
|
|
// fs.Debugf(nil, "r=%v start=%d, offset=%d, found=%v", r, start, offset, r.Pos >= start && r.Pos < offset+window)
|
|
|
|
if r.Pos >= start && r.Pos < offset+window {
|
2020-06-04 16:33:50 +08:00
|
|
|
// Found downloader which will soon have our data
|
|
|
|
dl.setRange(r)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2020-08-05 22:29:45 +08:00
|
|
|
if !startNew {
|
|
|
|
return nil
|
|
|
|
}
|
2024-03-04 17:47:48 +08:00
|
|
|
// Size can be 0 here if file shrinks - no need to download
|
|
|
|
if r.Size == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2020-06-04 16:33:50 +08:00
|
|
|
// Downloader not found so start a new one
|
2022-06-09 04:25:17 +08:00
|
|
|
_, err = dls._newDownloader(r)
|
2020-06-04 16:33:50 +08:00
|
|
|
if err != nil {
|
2020-06-22 21:39:13 +08:00
|
|
|
dls._countErrors(0, err)
|
2021-11-04 18:12:57 +08:00
|
|
|
return fmt.Errorf("failed to start downloader: %w", err)
|
2020-06-04 16:33:50 +08:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-06-22 23:31:08 +08:00
|
|
|
// EnsureDownloader makes sure a downloader is running for the range
|
|
|
|
// passed in. If one isn't found then it starts it.
|
|
|
|
//
|
|
|
|
// It does not wait for the range to be downloaded
|
|
|
|
func (dls *Downloaders) EnsureDownloader(r ranges.Range) (err error) {
|
2020-06-04 16:33:50 +08:00
|
|
|
dls.mu.Lock()
|
|
|
|
defer dls.mu.Unlock()
|
|
|
|
return dls._ensureDownloader(r)
|
|
|
|
}
|
|
|
|
|
|
|
|
// _dispatchWaiters() sends any waiters which have completed back to
|
|
|
|
// their callers.
|
|
|
|
//
|
|
|
|
// Call with the mutex held
|
2020-06-22 23:31:08 +08:00
|
|
|
func (dls *Downloaders) _dispatchWaiters() {
|
2020-06-04 16:33:50 +08:00
|
|
|
if len(dls.waiters) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
newWaiters := dls.waiters[:0]
|
|
|
|
for _, waiter := range dls.waiters {
|
2024-03-04 17:47:48 +08:00
|
|
|
// Clip the size against the actual size in case it has shrunk
|
|
|
|
r := waiter.r
|
|
|
|
r.Clip(dls.src.Size())
|
|
|
|
if dls.item.HasRange(r) {
|
2020-06-04 16:33:50 +08:00
|
|
|
waiter.errChan <- nil
|
|
|
|
} else {
|
|
|
|
newWaiters = append(newWaiters, waiter)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dls.waiters = newWaiters
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send any waiters which have completed back to their callers and make sure
|
|
|
|
// there is a downloader appropriate for each waiter
|
2020-06-22 23:31:08 +08:00
|
|
|
func (dls *Downloaders) kickWaiters() (err error) {
|
2020-06-04 16:33:50 +08:00
|
|
|
dls.mu.Lock()
|
|
|
|
defer dls.mu.Unlock()
|
|
|
|
|
|
|
|
dls._dispatchWaiters()
|
|
|
|
|
|
|
|
if len(dls.waiters) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure each waiter has a downloader
|
2020-06-22 23:31:08 +08:00
|
|
|
// This is an O(waiters*Downloaders) algorithm
|
2020-06-04 16:33:50 +08:00
|
|
|
// However the number of waiters and the number of downloaders
|
|
|
|
// are both expected to be small.
|
|
|
|
for _, waiter := range dls.waiters {
|
|
|
|
err = dls._ensureDownloader(waiter.r)
|
|
|
|
if err != nil {
|
2020-06-12 21:53:47 +08:00
|
|
|
// Failures here will be retried by background kicker
|
2020-07-06 23:06:42 +08:00
|
|
|
fs.Errorf(dls.src, "vfs cache: restart download failed: %v", err)
|
2020-06-04 16:33:50 +08:00
|
|
|
}
|
|
|
|
}
|
2020-08-25 23:20:29 +08:00
|
|
|
if fserrors.IsErrNoSpace(dls.lastErr) {
|
|
|
|
fs.Errorf(dls.src, "vfs cache: cache is out of space %d/%d: last error: %v", dls.errorCount, maxErrorCount, dls.lastErr)
|
|
|
|
dls._closeWaiters(dls.lastErr)
|
|
|
|
return dls.lastErr
|
|
|
|
}
|
2020-06-04 16:33:50 +08:00
|
|
|
|
2020-06-22 21:39:13 +08:00
|
|
|
if dls.errorCount > maxErrorCount {
|
2020-07-06 23:06:42 +08:00
|
|
|
fs.Errorf(dls.src, "vfs cache: too many errors %d/%d: last error: %v", dls.errorCount, maxErrorCount, dls.lastErr)
|
2020-06-22 21:39:13 +08:00
|
|
|
dls._closeWaiters(dls.lastErr)
|
|
|
|
return dls.lastErr
|
2020-06-12 21:53:47 +08:00
|
|
|
}
|
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
return nil
|
2020-03-01 02:08:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Write writes len(p) bytes from p to the underlying data stream. It
|
|
|
|
// returns the number of bytes written from p (0 <= n <= len(p)) and
|
|
|
|
// any error encountered that caused the write to stop early. Write
|
|
|
|
// must return a non-nil error if it returns n < len(p). Write must
|
|
|
|
// not modify the slice data, even temporarily.
|
|
|
|
//
|
|
|
|
// Implementations must not retain p.
|
|
|
|
func (dl *downloader) Write(p []byte) (n int, err error) {
|
2020-07-06 23:06:42 +08:00
|
|
|
// defer log.Trace(dl.dls.src, "p_len=%d", len(p))("n=%d, err=%v", &n, &err)
|
2020-03-01 02:08:22 +08:00
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
// Kick the waiters on exit if some characters received
|
|
|
|
defer func() {
|
|
|
|
if n <= 0 {
|
|
|
|
return
|
2020-03-01 02:08:22 +08:00
|
|
|
}
|
2020-06-04 16:33:50 +08:00
|
|
|
if waitErr := dl.dls.kickWaiters(); waitErr != nil {
|
|
|
|
fs.Errorf(dl.dls.src, "vfs cache: download write: failed to kick waiters: %v", waitErr)
|
|
|
|
if err == nil {
|
|
|
|
err = waitErr
|
2020-03-01 02:08:22 +08:00
|
|
|
}
|
|
|
|
}
|
2020-06-04 16:33:50 +08:00
|
|
|
}()
|
|
|
|
|
|
|
|
dl.mu.Lock()
|
|
|
|
defer dl.mu.Unlock()
|
|
|
|
|
|
|
|
// Wait here if we have reached maxOffset until
|
|
|
|
// - we are quitting
|
|
|
|
// - we get kicked
|
|
|
|
// - timeout happens
|
2020-08-05 22:29:45 +08:00
|
|
|
loop:
|
|
|
|
for dl.offset >= dl.maxOffset {
|
2020-06-04 16:33:50 +08:00
|
|
|
var timeout = time.NewTimer(maxDownloaderIdleTime)
|
|
|
|
dl.mu.Unlock()
|
|
|
|
select {
|
|
|
|
case <-dl.quit:
|
|
|
|
dl.mu.Lock()
|
|
|
|
timeout.Stop()
|
2020-08-05 22:29:45 +08:00
|
|
|
break loop
|
2020-06-04 16:33:50 +08:00
|
|
|
case <-dl.kick:
|
|
|
|
dl.mu.Lock()
|
|
|
|
timeout.Stop()
|
|
|
|
case <-timeout.C:
|
|
|
|
// stop any future reading
|
|
|
|
dl.mu.Lock()
|
|
|
|
if !dl.stop {
|
2020-07-06 23:06:42 +08:00
|
|
|
fs.Debugf(dl.dls.src, "vfs cache: stopping download thread as it timed out")
|
2020-06-04 16:33:50 +08:00
|
|
|
dl._stop()
|
2020-03-01 02:08:22 +08:00
|
|
|
}
|
2020-08-05 22:29:45 +08:00
|
|
|
break loop
|
2020-03-01 02:08:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-22 23:31:08 +08:00
|
|
|
n, skipped, err := dl.dls.item.WriteAtNoOverwrite(p, dl.offset)
|
2020-06-04 16:33:50 +08:00
|
|
|
if skipped == n {
|
|
|
|
dl.skipped += int64(skipped)
|
|
|
|
} else {
|
|
|
|
dl.skipped = 0
|
2020-03-01 02:08:22 +08:00
|
|
|
}
|
2020-06-04 16:33:50 +08:00
|
|
|
dl.offset += int64(n)
|
2020-03-01 02:08:22 +08:00
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
// Kill this downloader if skipped too many bytes
|
|
|
|
if !dl.stop && dl.skipped > maxSkipBytes {
|
2020-07-06 23:06:42 +08:00
|
|
|
fs.Debugf(dl.dls.src, "vfs cache: stopping download thread as it has skipped %d bytes", dl.skipped)
|
2020-06-04 16:33:50 +08:00
|
|
|
dl._stop()
|
|
|
|
}
|
2020-06-27 00:24:36 +08:00
|
|
|
|
|
|
|
// If running without a async buffer then stop now as
|
|
|
|
// StopBuffering has no effect if the Account wasn't buffered
|
|
|
|
// so we need to stop manually now rather than wait for the
|
|
|
|
// AsyncReader to stop.
|
|
|
|
if dl.stop && !dl.in.HasBuffer() {
|
|
|
|
err = asyncreader.ErrorStreamAbandoned
|
|
|
|
}
|
2020-06-04 16:33:50 +08:00
|
|
|
return n, err
|
2020-03-01 02:08:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// open the file from offset
|
|
|
|
//
|
|
|
|
// should be called on a fresh downloader
|
|
|
|
func (dl *downloader) open(offset int64) (err error) {
|
2020-07-06 23:06:42 +08:00
|
|
|
// defer log.Trace(dl.dls.src, "offset=%d", offset)("err=%v", &err)
|
2024-01-19 00:44:13 +08:00
|
|
|
dl.tr = accounting.Stats(dl.dls.ctx).NewTransfer(dl.dls.src, nil)
|
2020-03-01 02:08:22 +08:00
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
size := dl.dls.src.Size()
|
2020-03-01 02:08:22 +08:00
|
|
|
if size < 0 {
|
|
|
|
// FIXME should just completely download these
|
|
|
|
return errors.New("can't open unknown sized file")
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME hashType needs to ignore when --no-checksum is set too? Which is a VFS flag.
|
2020-06-04 16:33:50 +08:00
|
|
|
// var rangeOption *fs.RangeOption
|
|
|
|
// if offset > 0 {
|
|
|
|
// rangeOption = &fs.RangeOption{Start: offset, End: size - 1}
|
|
|
|
// }
|
2020-11-05 19:33:32 +08:00
|
|
|
// in0, err := operations.NewReOpen(dl.dls.ctx, dl.dls.src, ci.LowLevelRetries, dl.dls.item.c.hashOption, rangeOption)
|
2020-06-04 16:33:50 +08:00
|
|
|
|
2024-03-13 00:57:16 +08:00
|
|
|
in0 := chunkedreader.New(context.TODO(), dl.dls.src, int64(dl.dls.opt.ChunkSize), int64(dl.dls.opt.ChunkSizeLimit), dl.dls.opt.ChunkStreams)
|
2020-06-04 16:33:50 +08:00
|
|
|
_, err = in0.Seek(offset, 0)
|
2020-03-01 02:08:22 +08:00
|
|
|
if err != nil {
|
2021-11-04 18:12:57 +08:00
|
|
|
return fmt.Errorf("vfs reader: failed to open source file: %w", err)
|
2020-03-01 02:08:22 +08:00
|
|
|
}
|
2020-06-04 22:09:03 +08:00
|
|
|
dl.in = dl.tr.Account(dl.dls.ctx, in0).WithBuffer() // account and buffer the transfer
|
2020-03-01 02:08:22 +08:00
|
|
|
|
|
|
|
dl.offset = offset
|
|
|
|
|
|
|
|
// FIXME set mod time
|
|
|
|
// FIXME check checksums
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
// close the downloader
|
2020-03-01 02:08:22 +08:00
|
|
|
func (dl *downloader) close(inErr error) (err error) {
|
2020-07-06 23:06:42 +08:00
|
|
|
// defer log.Trace(dl.dls.src, "inErr=%v", err)("err=%v", &err)
|
2020-06-04 16:33:50 +08:00
|
|
|
checkErr := func(e error) {
|
2021-11-04 18:12:57 +08:00
|
|
|
if e == nil || errors.Is(err, asyncreader.ErrorStreamAbandoned) {
|
2020-06-04 16:33:50 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
err = e
|
|
|
|
}
|
2020-03-01 02:08:22 +08:00
|
|
|
dl.mu.Lock()
|
|
|
|
if dl.in != nil {
|
2020-06-04 16:33:50 +08:00
|
|
|
checkErr(dl.in.Close())
|
2020-03-01 02:08:22 +08:00
|
|
|
dl.in = nil
|
|
|
|
}
|
|
|
|
if dl.tr != nil {
|
2020-11-06 00:59:59 +08:00
|
|
|
dl.tr.Done(dl.dls.ctx, inErr)
|
2020-03-01 02:08:22 +08:00
|
|
|
dl.tr = nil
|
|
|
|
}
|
2020-06-04 16:33:50 +08:00
|
|
|
dl._closed = true
|
2020-03-01 02:08:22 +08:00
|
|
|
dl.mu.Unlock()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 08:17:24 +08:00
|
|
|
// closed returns true if the downloader has been closed already
|
2020-06-04 16:33:50 +08:00
|
|
|
func (dl *downloader) closed() bool {
|
|
|
|
dl.mu.Lock()
|
|
|
|
defer dl.mu.Unlock()
|
|
|
|
return dl._closed
|
|
|
|
}
|
|
|
|
|
|
|
|
// stop the downloader if running
|
|
|
|
//
|
|
|
|
// Call with the mutex held
|
|
|
|
func (dl *downloader) _stop() {
|
2020-07-06 23:06:42 +08:00
|
|
|
// defer log.Trace(dl.dls.src, "")("")
|
2020-06-04 16:33:50 +08:00
|
|
|
|
|
|
|
// exit if have already called _stop
|
|
|
|
if dl.stop {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
dl.stop = true
|
|
|
|
|
|
|
|
// Signal quit now to unblock the downloader
|
|
|
|
close(dl.quit)
|
|
|
|
|
|
|
|
// stop the downloader by stopping the async reader buffering
|
|
|
|
// any more input. This causes all the stuff in the async
|
2021-03-03 03:11:57 +08:00
|
|
|
// buffer (which can be many MiB) to be written to the disk
|
2020-06-04 16:33:50 +08:00
|
|
|
// before exiting.
|
|
|
|
if dl.in != nil {
|
|
|
|
dl.in.StopBuffering()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// stop the downloader if running then close it with the error passed in
|
|
|
|
func (dl *downloader) stopAndClose(inErr error) (err error) {
|
|
|
|
// Stop the downloader by closing its input
|
|
|
|
dl.mu.Lock()
|
|
|
|
dl._stop()
|
|
|
|
dl.mu.Unlock()
|
|
|
|
// wait for downloader to finish...
|
|
|
|
// do this without mutex as asyncreader
|
|
|
|
// calls back into Write() which needs the lock
|
|
|
|
dl.wg.Wait()
|
|
|
|
return dl.close(inErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start downloading to the local file starting at offset until maxOffset.
|
2020-06-22 21:39:13 +08:00
|
|
|
func (dl *downloader) download() (n int64, err error) {
|
2020-07-06 23:06:42 +08:00
|
|
|
// defer log.Trace(dl.dls.src, "")("err=%v", &err)
|
2020-06-22 21:39:13 +08:00
|
|
|
n, err = dl.in.WriteTo(dl)
|
2021-11-04 18:12:57 +08:00
|
|
|
if err != nil && !errors.Is(err, asyncreader.ErrorStreamAbandoned) {
|
|
|
|
return n, fmt.Errorf("vfs reader: failed to write to cache file: %w", err)
|
2020-03-01 02:08:22 +08:00
|
|
|
}
|
2020-08-25 23:20:29 +08:00
|
|
|
|
2020-06-22 21:39:13 +08:00
|
|
|
return n, nil
|
2020-03-01 02:08:22 +08:00
|
|
|
}
|
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
// setRange makes sure the downloader is downloading the range passed in
|
|
|
|
func (dl *downloader) setRange(r ranges.Range) {
|
2020-08-05 22:29:45 +08:00
|
|
|
// defer log.Trace(dl.dls.src, "r=%v", r)("")
|
2020-03-01 02:08:22 +08:00
|
|
|
dl.mu.Lock()
|
2020-06-04 16:33:50 +08:00
|
|
|
maxOffset := r.End()
|
|
|
|
if maxOffset > dl.maxOffset {
|
|
|
|
dl.maxOffset = maxOffset
|
|
|
|
}
|
2020-03-01 02:08:22 +08:00
|
|
|
dl.mu.Unlock()
|
2020-08-05 22:29:45 +08:00
|
|
|
// fs.Debugf(dl.dls.src, "kicking downloader with maxOffset %d", maxOffset)
|
|
|
|
select {
|
|
|
|
case dl.kick <- struct{}{}:
|
|
|
|
default:
|
|
|
|
}
|
2020-03-01 02:08:22 +08:00
|
|
|
}
|
|
|
|
|
2020-06-04 16:33:50 +08:00
|
|
|
// get the current range this downloader is working on
|
2020-08-05 16:57:03 +08:00
|
|
|
func (dl *downloader) getRange() (start, offset int64) {
|
2020-03-01 02:08:22 +08:00
|
|
|
dl.mu.Lock()
|
|
|
|
defer dl.mu.Unlock()
|
2020-08-05 16:57:03 +08:00
|
|
|
return dl.start, dl.offset
|
2020-03-01 02:08:22 +08:00
|
|
|
}
|