2019-02-11 02:20:58 +08:00
|
|
|
package operations
|
|
|
|
|
|
|
|
import (
|
2019-06-17 16:34:30 +08:00
|
|
|
"context"
|
2021-11-04 18:12:57 +08:00
|
|
|
"errors"
|
2019-02-11 02:20:58 +08:00
|
|
|
"io"
|
|
|
|
"sync"
|
|
|
|
|
2019-07-29 01:47:38 +08:00
|
|
|
"github.com/rclone/rclone/fs"
|
2019-12-03 00:04:03 +08:00
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
2019-02-11 02:20:58 +08:00
|
|
|
)
|
|
|
|
|
2020-02-13 22:27:50 +08:00
|
|
|
// ReOpen is a wrapper for an object reader which reopens the stream on error
|
|
|
|
type ReOpen struct {
|
2020-02-10 17:01:28 +08:00
|
|
|
ctx context.Context
|
|
|
|
mu sync.Mutex // mutex to protect the below
|
|
|
|
src fs.Object // object to open
|
|
|
|
options []fs.OpenOption // option to pass to initial open
|
|
|
|
rc io.ReadCloser // underlying stream
|
|
|
|
read int64 // number of bytes read from this stream
|
|
|
|
maxTries int // maximum number of retries
|
|
|
|
tries int // number of retries we've had so far in this stream
|
|
|
|
err error // if this is set then Read/Close calls will return it
|
|
|
|
opened bool // if set then rc is valid and needs closing
|
2019-02-11 02:20:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
errorFileClosed = errors.New("file already closed")
|
|
|
|
errorTooManyTries = errors.New("failed to reopen: too many retries")
|
|
|
|
)
|
|
|
|
|
2020-02-13 22:27:50 +08:00
|
|
|
// NewReOpen makes a handle which will reopen itself and seek to where it was on errors
|
2019-04-23 16:44:41 +08:00
|
|
|
//
|
|
|
|
// If hashOption is set this will be applied when reading from the start
|
|
|
|
//
|
|
|
|
// If rangeOption is set then this will applied when reading from the
|
|
|
|
// start, and updated on retries.
|
2020-02-10 17:01:28 +08:00
|
|
|
func NewReOpen(ctx context.Context, src fs.Object, maxTries int, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
2020-02-13 22:27:50 +08:00
|
|
|
h := &ReOpen{
|
2020-02-10 17:01:28 +08:00
|
|
|
ctx: ctx,
|
|
|
|
src: src,
|
|
|
|
maxTries: maxTries,
|
|
|
|
options: options,
|
2019-02-11 02:20:58 +08:00
|
|
|
}
|
|
|
|
h.mu.Lock()
|
|
|
|
defer h.mu.Unlock()
|
|
|
|
err = h.open()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return h, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// open the underlying handle - call with lock held
|
|
|
|
//
|
|
|
|
// we don't retry here as the Open() call will itself have low level retries
|
2020-02-13 22:27:50 +08:00
|
|
|
func (h *ReOpen) open() error {
|
2020-02-10 17:01:28 +08:00
|
|
|
opts := []fs.OpenOption{}
|
|
|
|
var hashOption *fs.HashesOption
|
|
|
|
var rangeOption *fs.RangeOption
|
|
|
|
for _, option := range h.options {
|
|
|
|
switch option.(type) {
|
|
|
|
case *fs.HashesOption:
|
|
|
|
hashOption = option.(*fs.HashesOption)
|
|
|
|
case *fs.RangeOption:
|
|
|
|
rangeOption = option.(*fs.RangeOption)
|
|
|
|
case *fs.HTTPOption:
|
|
|
|
opts = append(opts, option)
|
|
|
|
default:
|
|
|
|
if option.Mandatory() {
|
|
|
|
fs.Logf(h.src, "Unsupported mandatory option: %v", option)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-02-11 02:20:58 +08:00
|
|
|
if h.read == 0 {
|
2020-02-10 17:01:28 +08:00
|
|
|
if rangeOption != nil {
|
|
|
|
opts = append(opts, rangeOption)
|
2019-04-23 16:44:41 +08:00
|
|
|
}
|
2020-02-10 17:01:28 +08:00
|
|
|
if hashOption != nil {
|
2019-04-23 16:44:41 +08:00
|
|
|
// put hashOption on if reading from the start, ditch otherwise
|
2020-02-10 17:01:28 +08:00
|
|
|
opts = append(opts, hashOption)
|
2019-04-23 16:44:41 +08:00
|
|
|
}
|
2019-02-11 02:20:58 +08:00
|
|
|
} else {
|
2020-02-10 17:01:28 +08:00
|
|
|
if rangeOption != nil {
|
2019-04-23 16:44:41 +08:00
|
|
|
// range to the read point
|
2020-02-10 17:01:28 +08:00
|
|
|
opts = append(opts, &fs.RangeOption{Start: rangeOption.Start + h.read, End: rangeOption.End})
|
2019-04-23 16:44:41 +08:00
|
|
|
} else {
|
|
|
|
// seek to the read point
|
|
|
|
opts = append(opts, &fs.SeekOption{Offset: h.read})
|
|
|
|
}
|
2019-02-11 02:20:58 +08:00
|
|
|
}
|
|
|
|
h.tries++
|
|
|
|
if h.tries > h.maxTries {
|
|
|
|
h.err = errorTooManyTries
|
|
|
|
} else {
|
2019-06-17 16:34:30 +08:00
|
|
|
h.rc, h.err = h.src.Open(h.ctx, opts...)
|
2019-02-11 02:20:58 +08:00
|
|
|
}
|
|
|
|
if h.err != nil {
|
|
|
|
if h.tries > 1 {
|
|
|
|
fs.Debugf(h.src, "Reopen failed after %d bytes read: %v", h.read, h.err)
|
|
|
|
}
|
|
|
|
return h.err
|
|
|
|
}
|
|
|
|
h.opened = true
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read bytes retrying as necessary
|
2020-02-13 22:27:50 +08:00
|
|
|
func (h *ReOpen) Read(p []byte) (n int, err error) {
|
2019-02-11 02:20:58 +08:00
|
|
|
h.mu.Lock()
|
|
|
|
defer h.mu.Unlock()
|
|
|
|
if h.err != nil {
|
|
|
|
// return a previous error if there is one
|
|
|
|
return n, h.err
|
|
|
|
}
|
|
|
|
n, err = h.rc.Read(p)
|
|
|
|
if err != nil {
|
|
|
|
h.err = err
|
|
|
|
}
|
|
|
|
h.read += int64(n)
|
2019-12-03 00:04:03 +08:00
|
|
|
if err != nil && err != io.EOF && !fserrors.IsNoLowLevelRetryError(err) {
|
2019-02-11 02:20:58 +08:00
|
|
|
// close underlying stream
|
|
|
|
h.opened = false
|
|
|
|
_ = h.rc.Close()
|
|
|
|
// reopen stream, clearing error if successful
|
|
|
|
fs.Debugf(h.src, "Reopening on read failure after %d bytes: retry %d/%d: %v", h.read, h.tries, h.maxTries, err)
|
|
|
|
if h.open() == nil {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close the stream
|
2020-02-13 22:27:50 +08:00
|
|
|
func (h *ReOpen) Close() error {
|
2019-02-11 02:20:58 +08:00
|
|
|
h.mu.Lock()
|
|
|
|
defer h.mu.Unlock()
|
|
|
|
if !h.opened {
|
|
|
|
return errorFileClosed
|
|
|
|
}
|
|
|
|
h.opened = false
|
|
|
|
h.err = errorFileClosed
|
|
|
|
return h.rc.Close()
|
|
|
|
}
|