mirror of
https://github.com/rclone/rclone.git
synced 2024-11-25 09:41:44 +08:00
parent
e3fe31f7cb
commit
1fb6ad700f
|
@ -19,7 +19,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func prepare(t *testing.T, root string) {
|
func prepare(t *testing.T, root string) {
|
||||||
config.LoadConfig()
|
config.LoadConfig(context.Background())
|
||||||
|
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
config.FileSet(remoteName, "type", "alias")
|
config.FileSet(remoteName, "type", "alias")
|
||||||
|
|
|
@ -1182,7 +1182,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||||
err = f.deleteByID(ctx, object.ID, object.Name)
|
err = f.deleteByID(ctx, object.ID, object.Name)
|
||||||
checkErr(err)
|
checkErr(err)
|
||||||
tr.Done(err)
|
tr.Done(ctx, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
@ -1210,7 +1210,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||||
toBeDeleted <- object
|
toBeDeleted <- object
|
||||||
}
|
}
|
||||||
last = remote
|
last = remote
|
||||||
tr.Done(nil)
|
tr.Done(ctx, nil)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}))
|
}))
|
||||||
|
|
|
@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||||
ts := httptest.NewServer(handler)
|
ts := httptest.NewServer(handler)
|
||||||
|
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
config.LoadConfig()
|
config.LoadConfig(context.Background())
|
||||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||||
// fs.Config.DumpHeaders = true
|
// fs.Config.DumpHeaders = true
|
||||||
// fs.Config.DumpBodies = true
|
// fs.Config.DumpBodies = true
|
||||||
|
|
|
@ -41,7 +41,7 @@ func startServer(t *testing.T, f fs.Fs) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInit(t *testing.T) {
|
func TestInit(t *testing.T) {
|
||||||
config.LoadConfig()
|
config.LoadConfig(context.Background())
|
||||||
|
|
||||||
f, err := fs.NewFs(context.Background(), "testdata/files")
|
f, err := fs.NewFs(context.Background(), "testdata/files")
|
||||||
l, _ := f.List(context.Background(), "")
|
l, _ := f.List(context.Background(), "")
|
||||||
|
|
|
@ -294,7 +294,7 @@ func (d *Driver) ListDir(path string, callback func(ftp.FileInfo) error) (err er
|
||||||
// Account the transfer
|
// Account the transfer
|
||||||
tr := accounting.GlobalStats().NewTransferRemoteSize(path, node.Size())
|
tr := accounting.GlobalStats().NewTransferRemoteSize(path, node.Size())
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(err)
|
tr.Done(d.s.ctx, err)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for _, file := range dirEntries {
|
for _, file := range dirEntries {
|
||||||
|
@ -392,7 +392,7 @@ func (d *Driver) GetFile(path string, offset int64) (size int64, fr io.ReadClose
|
||||||
|
|
||||||
// Account the transfer
|
// Account the transfer
|
||||||
tr := accounting.GlobalStats().NewTransferRemoteSize(path, node.Size())
|
tr := accounting.GlobalStats().NewTransferRemoteSize(path, node.Size())
|
||||||
defer tr.Done(nil)
|
defer tr.Done(d.s.ctx, nil)
|
||||||
|
|
||||||
return node.Size(), handle, nil
|
return node.Size(), handle, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -206,7 +206,7 @@ func (s *server) serveFile(w http.ResponseWriter, r *http.Request, remote string
|
||||||
|
|
||||||
// Account the transfer
|
// Account the transfer
|
||||||
tr := accounting.Stats(r.Context()).NewTransfer(obj)
|
tr := accounting.Stats(r.Context()).NewTransfer(obj)
|
||||||
defer tr.Done(nil)
|
defer tr.Done(r.Context(), nil)
|
||||||
// FIXME in = fs.NewAccount(in, obj).WithBuffer() // account the transfer
|
// FIXME in = fs.NewAccount(in, obj).WithBuffer() // account the transfer
|
||||||
|
|
||||||
// Serve the file
|
// Serve the file
|
||||||
|
|
|
@ -60,7 +60,7 @@ var (
|
||||||
|
|
||||||
func TestInit(t *testing.T) {
|
func TestInit(t *testing.T) {
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
config.LoadConfig()
|
config.LoadConfig(context.Background())
|
||||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||||
// fs.Config.DumpHeaders = true
|
// fs.Config.DumpHeaders = true
|
||||||
// fs.Config.DumpBodies = true
|
// fs.Config.DumpBodies = true
|
||||||
|
|
|
@ -225,7 +225,7 @@ const (
|
||||||
func (d *Directory) Serve(w http.ResponseWriter, r *http.Request) {
|
func (d *Directory) Serve(w http.ResponseWriter, r *http.Request) {
|
||||||
// Account the transfer
|
// Account the transfer
|
||||||
tr := accounting.Stats(r.Context()).NewTransferRemoteSize(d.DirRemote, -1)
|
tr := accounting.Stats(r.Context()).NewTransferRemoteSize(d.DirRemote, -1)
|
||||||
defer tr.Done(nil)
|
defer tr.Done(r.Context(), nil)
|
||||||
|
|
||||||
fs.Infof(d.DirRemote, "%s: Serving directory", r.RemoteAddr)
|
fs.Infof(d.DirRemote, "%s: Serving directory", r.RemoteAddr)
|
||||||
|
|
||||||
|
|
|
@ -77,7 +77,7 @@ func Object(w http.ResponseWriter, r *http.Request, o fs.Object) {
|
||||||
}
|
}
|
||||||
tr := accounting.Stats(r.Context()).NewTransfer(o)
|
tr := accounting.Stats(r.Context()).NewTransfer(o)
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(err)
|
tr.Done(r.Context(), err)
|
||||||
}()
|
}()
|
||||||
in := tr.Account(r.Context(), file) // account the transfer (no buffering)
|
in := tr.Account(r.Context(), file) // account the transfer (no buffering)
|
||||||
|
|
||||||
|
|
|
@ -28,8 +28,9 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewAccountSizeName(t *testing.T) {
|
func TestNewAccountSizeName(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
|
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
|
||||||
stats := NewStats()
|
stats := NewStats(ctx)
|
||||||
acc := newAccountSizeName(context.Background(), stats, in, 1, "test")
|
acc := newAccountSizeName(context.Background(), stats, in, 1, "test")
|
||||||
assert.Equal(t, in, acc.in)
|
assert.Equal(t, in, acc.in)
|
||||||
assert.Equal(t, acc, stats.inProgress.get("test"))
|
assert.Equal(t, acc, stats.inProgress.get("test"))
|
||||||
|
@ -42,10 +43,11 @@ func TestNewAccountSizeName(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccountWithBuffer(t *testing.T) {
|
func TestAccountWithBuffer(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
|
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
|
||||||
|
|
||||||
stats := NewStats()
|
stats := NewStats(ctx)
|
||||||
acc := newAccountSizeName(context.Background(), stats, in, -1, "test")
|
acc := newAccountSizeName(ctx, stats, in, -1, "test")
|
||||||
assert.False(t, acc.HasBuffer())
|
assert.False(t, acc.HasBuffer())
|
||||||
acc.WithBuffer()
|
acc.WithBuffer()
|
||||||
assert.True(t, acc.HasBuffer())
|
assert.True(t, acc.HasBuffer())
|
||||||
|
@ -54,7 +56,7 @@ func TestAccountWithBuffer(t *testing.T) {
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
assert.NoError(t, acc.Close())
|
assert.NoError(t, acc.Close())
|
||||||
|
|
||||||
acc = newAccountSizeName(context.Background(), stats, in, 1, "test")
|
acc = newAccountSizeName(ctx, stats, in, 1, "test")
|
||||||
acc.WithBuffer()
|
acc.WithBuffer()
|
||||||
// should not have a buffer for a small size
|
// should not have a buffer for a small size
|
||||||
_, ok = acc.in.(*asyncreader.AsyncReader)
|
_, ok = acc.in.(*asyncreader.AsyncReader)
|
||||||
|
@ -63,11 +65,12 @@ func TestAccountWithBuffer(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccountGetUpdateReader(t *testing.T) {
|
func TestAccountGetUpdateReader(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
test := func(doClose bool) func(t *testing.T) {
|
test := func(doClose bool) func(t *testing.T) {
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
|
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
|
||||||
stats := NewStats()
|
stats := NewStats(ctx)
|
||||||
acc := newAccountSizeName(context.Background(), stats, in, 1, "test")
|
acc := newAccountSizeName(ctx, stats, in, 1, "test")
|
||||||
|
|
||||||
assert.Equal(t, in, acc.GetReader())
|
assert.Equal(t, in, acc.GetReader())
|
||||||
assert.Equal(t, acc, stats.inProgress.get("test"))
|
assert.Equal(t, acc, stats.inProgress.get("test"))
|
||||||
|
@ -78,7 +81,7 @@ func TestAccountGetUpdateReader(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
in2 := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
|
in2 := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
|
||||||
acc.UpdateReader(context.Background(), in2)
|
acc.UpdateReader(ctx, in2)
|
||||||
|
|
||||||
assert.Equal(t, in2, acc.GetReader())
|
assert.Equal(t, in2, acc.GetReader())
|
||||||
assert.Equal(t, acc, stats.inProgress.get("test"))
|
assert.Equal(t, acc, stats.inProgress.get("test"))
|
||||||
|
@ -91,9 +94,10 @@ func TestAccountGetUpdateReader(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccountRead(t *testing.T) {
|
func TestAccountRead(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
|
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
|
||||||
stats := NewStats()
|
stats := NewStats(ctx)
|
||||||
acc := newAccountSizeName(context.Background(), stats, in, 1, "test")
|
acc := newAccountSizeName(ctx, stats, in, 1, "test")
|
||||||
|
|
||||||
assert.True(t, acc.values.start.IsZero())
|
assert.True(t, acc.values.start.IsZero())
|
||||||
acc.values.mu.Lock()
|
acc.values.mu.Lock()
|
||||||
|
@ -128,13 +132,14 @@ func TestAccountRead(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccountWriteTo(t *testing.T, withBuffer bool) {
|
func testAccountWriteTo(t *testing.T, withBuffer bool) {
|
||||||
|
ctx := context.Background()
|
||||||
buf := make([]byte, 2*asyncreader.BufferSize+1)
|
buf := make([]byte, 2*asyncreader.BufferSize+1)
|
||||||
for i := range buf {
|
for i := range buf {
|
||||||
buf[i] = byte(i % 251)
|
buf[i] = byte(i % 251)
|
||||||
}
|
}
|
||||||
in := ioutil.NopCloser(bytes.NewBuffer(buf))
|
in := ioutil.NopCloser(bytes.NewBuffer(buf))
|
||||||
stats := NewStats()
|
stats := NewStats(ctx)
|
||||||
acc := newAccountSizeName(context.Background(), stats, in, int64(len(buf)), "test")
|
acc := newAccountSizeName(ctx, stats, in, int64(len(buf)), "test")
|
||||||
if withBuffer {
|
if withBuffer {
|
||||||
acc = acc.WithBuffer()
|
acc = acc.WithBuffer()
|
||||||
}
|
}
|
||||||
|
@ -172,9 +177,10 @@ func TestAccountWriteToWithBuffer(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccountString(t *testing.T) {
|
func TestAccountString(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
|
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
|
||||||
stats := NewStats()
|
stats := NewStats(ctx)
|
||||||
acc := newAccountSizeName(context.Background(), stats, in, 3, "test")
|
acc := newAccountSizeName(ctx, stats, in, 3, "test")
|
||||||
|
|
||||||
// FIXME not an exhaustive test!
|
// FIXME not an exhaustive test!
|
||||||
|
|
||||||
|
@ -192,9 +198,10 @@ func TestAccountString(t *testing.T) {
|
||||||
|
|
||||||
// Test the Accounter interface methods on Account and accountStream
|
// Test the Accounter interface methods on Account and accountStream
|
||||||
func TestAccountAccounter(t *testing.T) {
|
func TestAccountAccounter(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
|
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
|
||||||
stats := NewStats()
|
stats := NewStats(ctx)
|
||||||
acc := newAccountSizeName(context.Background(), stats, in, 3, "test")
|
acc := newAccountSizeName(ctx, stats, in, 3, "test")
|
||||||
|
|
||||||
assert.True(t, in == acc.OldStream())
|
assert.True(t, in == acc.OldStream())
|
||||||
|
|
||||||
|
@ -250,6 +257,7 @@ func TestAccountAccounter(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccountMaxTransfer(t *testing.T) {
|
func TestAccountMaxTransfer(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
old := fs.Config.MaxTransfer
|
old := fs.Config.MaxTransfer
|
||||||
oldMode := fs.Config.CutoffMode
|
oldMode := fs.Config.CutoffMode
|
||||||
|
|
||||||
|
@ -260,8 +268,8 @@ func TestAccountMaxTransfer(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
in := ioutil.NopCloser(bytes.NewBuffer(make([]byte, 100)))
|
in := ioutil.NopCloser(bytes.NewBuffer(make([]byte, 100)))
|
||||||
stats := NewStats()
|
stats := NewStats(ctx)
|
||||||
acc := newAccountSizeName(context.Background(), stats, in, 1, "test")
|
acc := newAccountSizeName(ctx, stats, in, 1, "test")
|
||||||
|
|
||||||
var b = make([]byte, 10)
|
var b = make([]byte, 10)
|
||||||
|
|
||||||
|
@ -277,8 +285,8 @@ func TestAccountMaxTransfer(t *testing.T) {
|
||||||
assert.True(t, fserrors.IsFatalError(err))
|
assert.True(t, fserrors.IsFatalError(err))
|
||||||
|
|
||||||
fs.Config.CutoffMode = fs.CutoffModeSoft
|
fs.Config.CutoffMode = fs.CutoffModeSoft
|
||||||
stats = NewStats()
|
stats = NewStats(ctx)
|
||||||
acc = newAccountSizeName(context.Background(), stats, in, 1, "test")
|
acc = newAccountSizeName(ctx, stats, in, 1, "test")
|
||||||
|
|
||||||
n, err = acc.Read(b)
|
n, err = acc.Read(b)
|
||||||
assert.Equal(t, 10, n)
|
assert.Equal(t, 10, n)
|
||||||
|
@ -292,6 +300,7 @@ func TestAccountMaxTransfer(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccountMaxTransferWriteTo(t *testing.T) {
|
func TestAccountMaxTransferWriteTo(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
old := fs.Config.MaxTransfer
|
old := fs.Config.MaxTransfer
|
||||||
oldMode := fs.Config.CutoffMode
|
oldMode := fs.Config.CutoffMode
|
||||||
|
|
||||||
|
@ -302,8 +311,8 @@ func TestAccountMaxTransferWriteTo(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
in := ioutil.NopCloser(readers.NewPatternReader(1024))
|
in := ioutil.NopCloser(readers.NewPatternReader(1024))
|
||||||
stats := NewStats()
|
stats := NewStats(ctx)
|
||||||
acc := newAccountSizeName(context.Background(), stats, in, 1, "test")
|
acc := newAccountSizeName(ctx, stats, in, 1, "test")
|
||||||
|
|
||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
|
|
||||||
|
@ -313,9 +322,10 @@ func TestAccountMaxTransferWriteTo(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccountReadCtx(t *testing.T) {
|
func TestAccountReadCtx(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx := context.Background()
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
in := ioutil.NopCloser(bytes.NewBuffer(make([]byte, 100)))
|
in := ioutil.NopCloser(bytes.NewBuffer(make([]byte, 100)))
|
||||||
stats := NewStats()
|
stats := NewStats(ctx)
|
||||||
acc := newAccountSizeName(ctx, stats, in, 1, "test")
|
acc := newAccountSizeName(ctx, stats, in, 1, "test")
|
||||||
|
|
||||||
var b = make([]byte, 10)
|
var b = make([]byte, 10)
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package accounting
|
package accounting
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
|
@ -13,7 +14,7 @@ type inProgress struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newInProgress makes a new inProgress object
|
// newInProgress makes a new inProgress object
|
||||||
func newInProgress() *inProgress {
|
func newInProgress(ctx context.Context) *inProgress {
|
||||||
return &inProgress{
|
return &inProgress{
|
||||||
m: make(map[string]*Account, fs.Config.Transfers),
|
m: make(map[string]*Account, fs.Config.Transfers),
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package accounting
|
package accounting
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -8,6 +10,7 @@ var namespace = "rclone_"
|
||||||
|
|
||||||
// RcloneCollector is a Prometheus collector for Rclone
|
// RcloneCollector is a Prometheus collector for Rclone
|
||||||
type RcloneCollector struct {
|
type RcloneCollector struct {
|
||||||
|
ctx context.Context
|
||||||
bytesTransferred *prometheus.Desc
|
bytesTransferred *prometheus.Desc
|
||||||
transferSpeed *prometheus.Desc
|
transferSpeed *prometheus.Desc
|
||||||
numOfErrors *prometheus.Desc
|
numOfErrors *prometheus.Desc
|
||||||
|
@ -21,8 +24,9 @@ type RcloneCollector struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRcloneCollector make a new RcloneCollector
|
// NewRcloneCollector make a new RcloneCollector
|
||||||
func NewRcloneCollector() *RcloneCollector {
|
func NewRcloneCollector(ctx context.Context) *RcloneCollector {
|
||||||
return &RcloneCollector{
|
return &RcloneCollector{
|
||||||
|
ctx: ctx,
|
||||||
bytesTransferred: prometheus.NewDesc(namespace+"bytes_transferred_total",
|
bytesTransferred: prometheus.NewDesc(namespace+"bytes_transferred_total",
|
||||||
"Total transferred bytes since the start of the Rclone process",
|
"Total transferred bytes since the start of the Rclone process",
|
||||||
nil, nil,
|
nil, nil,
|
||||||
|
@ -82,7 +86,7 @@ func (c *RcloneCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||||
|
|
||||||
// Collect is part of the Collector interface: https://godoc.org/github.com/prometheus/client_golang/prometheus#Collector
|
// Collect is part of the Collector interface: https://godoc.org/github.com/prometheus/client_golang/prometheus#Collector
|
||||||
func (c *RcloneCollector) Collect(ch chan<- prometheus.Metric) {
|
func (c *RcloneCollector) Collect(ch chan<- prometheus.Metric) {
|
||||||
s := groups.sum()
|
s := groups.sum(c.ctx)
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
|
|
||||||
ch <- prometheus.MustNewConstMetric(c.bytesTransferred, prometheus.CounterValue, float64(s.bytes))
|
ch <- prometheus.MustNewConstMetric(c.bytesTransferred, prometheus.CounterValue, float64(s.bytes))
|
||||||
|
|
|
@ -2,6 +2,7 @@ package accounting
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -22,6 +23,7 @@ var startTime = time.Now()
|
||||||
// StatsInfo accounts all transfers
|
// StatsInfo accounts all transfers
|
||||||
type StatsInfo struct {
|
type StatsInfo struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
|
ctx context.Context
|
||||||
bytes int64
|
bytes int64
|
||||||
errors int64
|
errors int64
|
||||||
lastError error
|
lastError error
|
||||||
|
@ -49,11 +51,12 @@ type StatsInfo struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStats creates an initialised StatsInfo
|
// NewStats creates an initialised StatsInfo
|
||||||
func NewStats() *StatsInfo {
|
func NewStats(ctx context.Context) *StatsInfo {
|
||||||
return &StatsInfo{
|
return &StatsInfo{
|
||||||
|
ctx: ctx,
|
||||||
checking: newTransferMap(fs.Config.Checkers, "checking"),
|
checking: newTransferMap(fs.Config.Checkers, "checking"),
|
||||||
transferring: newTransferMap(fs.Config.Transfers, "transferring"),
|
transferring: newTransferMap(fs.Config.Transfers, "transferring"),
|
||||||
inProgress: newInProgress(),
|
inProgress: newInProgress(ctx),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -332,10 +335,10 @@ func (s *StatsInfo) String() string {
|
||||||
// Add per transfer stats if required
|
// Add per transfer stats if required
|
||||||
if !fs.Config.StatsOneLine {
|
if !fs.Config.StatsOneLine {
|
||||||
if !s.checking.empty() {
|
if !s.checking.empty() {
|
||||||
_, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking.String(s.inProgress, s.transferring))
|
_, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking.String(s.ctx, s.inProgress, s.transferring))
|
||||||
}
|
}
|
||||||
if !s.transferring.empty() {
|
if !s.transferring.empty() {
|
||||||
_, _ = fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring.String(s.inProgress, nil))
|
_, _ = fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring.String(s.ctx, s.inProgress, nil))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -59,10 +59,10 @@ func rcRemoteStats(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||||
return rc.Params{}, err
|
return rc.Params{}, err
|
||||||
}
|
}
|
||||||
if group != "" {
|
if group != "" {
|
||||||
return StatsGroup(group).RemoteStats()
|
return StatsGroup(ctx, group).RemoteStats()
|
||||||
}
|
}
|
||||||
|
|
||||||
return groups.sum().RemoteStats()
|
return groups.sum(ctx).RemoteStats()
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -129,9 +129,9 @@ func rcTransferredStats(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||||
|
|
||||||
out := make(rc.Params)
|
out := make(rc.Params)
|
||||||
if group != "" {
|
if group != "" {
|
||||||
out["transferred"] = StatsGroup(group).Transferred()
|
out["transferred"] = StatsGroup(ctx, group).Transferred()
|
||||||
} else {
|
} else {
|
||||||
out["transferred"] = groups.sum().Transferred()
|
out["transferred"] = groups.sum(ctx).Transferred()
|
||||||
}
|
}
|
||||||
|
|
||||||
return out, nil
|
return out, nil
|
||||||
|
@ -265,28 +265,28 @@ func Stats(ctx context.Context) *StatsInfo {
|
||||||
if !ok {
|
if !ok {
|
||||||
return GlobalStats()
|
return GlobalStats()
|
||||||
}
|
}
|
||||||
return StatsGroup(group)
|
return StatsGroup(ctx, group)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StatsGroup gets stats by group name.
|
// StatsGroup gets stats by group name.
|
||||||
func StatsGroup(group string) *StatsInfo {
|
func StatsGroup(ctx context.Context, group string) *StatsInfo {
|
||||||
stats := groups.get(group)
|
stats := groups.get(group)
|
||||||
if stats == nil {
|
if stats == nil {
|
||||||
return NewStatsGroup(group)
|
return NewStatsGroup(ctx, group)
|
||||||
}
|
}
|
||||||
return stats
|
return stats
|
||||||
}
|
}
|
||||||
|
|
||||||
// GlobalStats returns special stats used for global accounting.
|
// GlobalStats returns special stats used for global accounting.
|
||||||
func GlobalStats() *StatsInfo {
|
func GlobalStats() *StatsInfo {
|
||||||
return StatsGroup(globalStats)
|
return StatsGroup(context.Background(), globalStats)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStatsGroup creates new stats under named group.
|
// NewStatsGroup creates new stats under named group.
|
||||||
func NewStatsGroup(group string) *StatsInfo {
|
func NewStatsGroup(ctx context.Context, group string) *StatsInfo {
|
||||||
stats := NewStats()
|
stats := NewStats(ctx)
|
||||||
stats.group = group
|
stats.group = group
|
||||||
groups.set(group, stats)
|
groups.set(ctx, group, stats)
|
||||||
return stats
|
return stats
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -305,7 +305,7 @@ func newStatsGroups() *statsGroups {
|
||||||
}
|
}
|
||||||
|
|
||||||
// set marks the stats as belonging to a group
|
// set marks the stats as belonging to a group
|
||||||
func (sg *statsGroups) set(group string, stats *StatsInfo) {
|
func (sg *statsGroups) set(ctx context.Context, group string, stats *StatsInfo) {
|
||||||
sg.mu.Lock()
|
sg.mu.Lock()
|
||||||
defer sg.mu.Unlock()
|
defer sg.mu.Unlock()
|
||||||
|
|
||||||
|
@ -343,11 +343,11 @@ func (sg *statsGroups) names() []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum returns aggregate stats that contains summation of all groups.
|
// sum returns aggregate stats that contains summation of all groups.
|
||||||
func (sg *statsGroups) sum() *StatsInfo {
|
func (sg *statsGroups) sum(ctx context.Context) *StatsInfo {
|
||||||
sg.mu.Lock()
|
sg.mu.Lock()
|
||||||
defer sg.mu.Unlock()
|
defer sg.mu.Unlock()
|
||||||
|
|
||||||
sum := NewStats()
|
sum := NewStats(ctx)
|
||||||
for _, stats := range sg.m {
|
for _, stats := range sg.m {
|
||||||
stats.mu.RLock()
|
stats.mu.RLock()
|
||||||
{
|
{
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package accounting
|
package accounting
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -11,6 +12,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestStatsGroupOperations(t *testing.T) {
|
func TestStatsGroupOperations(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
t.Run("empty group returns nil", func(t *testing.T) {
|
t.Run("empty group returns nil", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
@ -20,10 +22,10 @@ func TestStatsGroupOperations(t *testing.T) {
|
||||||
|
|
||||||
t.Run("set assigns stats to group", func(t *testing.T) {
|
t.Run("set assigns stats to group", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
stats := NewStats()
|
stats := NewStats(ctx)
|
||||||
sg := newStatsGroups()
|
sg := newStatsGroups()
|
||||||
sg.set("test", stats)
|
sg.set(ctx, "test", stats)
|
||||||
sg.set("test1", stats)
|
sg.set(ctx, "test1", stats)
|
||||||
if len(sg.m) != len(sg.names()) || len(sg.m) != 2 {
|
if len(sg.m) != len(sg.names()) || len(sg.m) != 2 {
|
||||||
t.Fatalf("Expected two stats got %d, %d", len(sg.m), len(sg.order))
|
t.Fatalf("Expected two stats got %d, %d", len(sg.m), len(sg.order))
|
||||||
}
|
}
|
||||||
|
@ -31,10 +33,10 @@ func TestStatsGroupOperations(t *testing.T) {
|
||||||
|
|
||||||
t.Run("get returns correct group", func(t *testing.T) {
|
t.Run("get returns correct group", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
stats := NewStats()
|
stats := NewStats(ctx)
|
||||||
sg := newStatsGroups()
|
sg := newStatsGroups()
|
||||||
sg.set("test", stats)
|
sg.set(ctx, "test", stats)
|
||||||
sg.set("test1", stats)
|
sg.set(ctx, "test1", stats)
|
||||||
got := sg.get("test")
|
got := sg.get("test")
|
||||||
if got != stats {
|
if got != stats {
|
||||||
t.Fatal("get returns incorrect stats")
|
t.Fatal("get returns incorrect stats")
|
||||||
|
@ -43,20 +45,20 @@ func TestStatsGroupOperations(t *testing.T) {
|
||||||
|
|
||||||
t.Run("sum returns correct values", func(t *testing.T) {
|
t.Run("sum returns correct values", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
stats1 := NewStats()
|
stats1 := NewStats(ctx)
|
||||||
stats1.bytes = 5
|
stats1.bytes = 5
|
||||||
stats1.errors = 6
|
stats1.errors = 6
|
||||||
stats1.oldDuration = time.Second
|
stats1.oldDuration = time.Second
|
||||||
stats1.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(time.Second)}}
|
stats1.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(time.Second)}}
|
||||||
stats2 := NewStats()
|
stats2 := NewStats(ctx)
|
||||||
stats2.bytes = 10
|
stats2.bytes = 10
|
||||||
stats2.errors = 12
|
stats2.errors = 12
|
||||||
stats2.oldDuration = 2 * time.Second
|
stats2.oldDuration = 2 * time.Second
|
||||||
stats2.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(2 * time.Second)}}
|
stats2.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(2 * time.Second)}}
|
||||||
sg := newStatsGroups()
|
sg := newStatsGroups()
|
||||||
sg.set("test1", stats1)
|
sg.set(ctx, "test1", stats1)
|
||||||
sg.set("test2", stats2)
|
sg.set(ctx, "test2", stats2)
|
||||||
sum := sg.sum()
|
sum := sg.sum(ctx)
|
||||||
assert.Equal(t, stats1.bytes+stats2.bytes, sum.bytes)
|
assert.Equal(t, stats1.bytes+stats2.bytes, sum.bytes)
|
||||||
assert.Equal(t, stats1.errors+stats2.errors, sum.errors)
|
assert.Equal(t, stats1.errors+stats2.errors, sum.errors)
|
||||||
assert.Equal(t, stats1.oldDuration+stats2.oldDuration, sum.oldDuration)
|
assert.Equal(t, stats1.oldDuration+stats2.oldDuration, sum.oldDuration)
|
||||||
|
@ -70,10 +72,10 @@ func TestStatsGroupOperations(t *testing.T) {
|
||||||
|
|
||||||
t.Run("delete removes stats", func(t *testing.T) {
|
t.Run("delete removes stats", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
stats := NewStats()
|
stats := NewStats(ctx)
|
||||||
sg := newStatsGroups()
|
sg := newStatsGroups()
|
||||||
sg.set("test", stats)
|
sg.set(ctx, "test", stats)
|
||||||
sg.set("test1", stats)
|
sg.set(ctx, "test1", stats)
|
||||||
sg.delete("test1")
|
sg.delete("test1")
|
||||||
if sg.get("test1") != nil {
|
if sg.get("test1") != nil {
|
||||||
t.Fatal("stats not deleted")
|
t.Fatal("stats not deleted")
|
||||||
|
@ -95,7 +97,7 @@ func TestStatsGroupOperations(t *testing.T) {
|
||||||
runtime.ReadMemStats(&start)
|
runtime.ReadMemStats(&start)
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
sg.set(fmt.Sprintf("test-%d", i), NewStats())
|
sg.set(ctx, fmt.Sprintf("test-%d", i), NewStats(ctx))
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package accounting
|
package accounting
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -67,7 +68,8 @@ func TestPercentage(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStatsError(t *testing.T) {
|
func TestStatsError(t *testing.T) {
|
||||||
s := NewStats()
|
ctx := context.Background()
|
||||||
|
s := NewStats(ctx)
|
||||||
assert.Equal(t, int64(0), s.GetErrors())
|
assert.Equal(t, int64(0), s.GetErrors())
|
||||||
assert.False(t, s.HadFatalError())
|
assert.False(t, s.HadFatalError())
|
||||||
assert.False(t, s.HadRetryError())
|
assert.False(t, s.HadRetryError())
|
||||||
|
@ -132,6 +134,7 @@ func TestStatsError(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStatsTotalDuration(t *testing.T) {
|
func TestStatsTotalDuration(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
time1 := startTime.Add(-40 * time.Second)
|
time1 := startTime.Add(-40 * time.Second)
|
||||||
time2 := time1.Add(10 * time.Second)
|
time2 := time1.Add(10 * time.Second)
|
||||||
|
@ -139,7 +142,7 @@ func TestStatsTotalDuration(t *testing.T) {
|
||||||
time4 := time3.Add(10 * time.Second)
|
time4 := time3.Add(10 * time.Second)
|
||||||
|
|
||||||
t.Run("Single completed transfer", func(t *testing.T) {
|
t.Run("Single completed transfer", func(t *testing.T) {
|
||||||
s := NewStats()
|
s := NewStats(ctx)
|
||||||
tr1 := &Transfer{
|
tr1 := &Transfer{
|
||||||
startedAt: time1,
|
startedAt: time1,
|
||||||
completedAt: time2,
|
completedAt: time2,
|
||||||
|
@ -158,7 +161,7 @@ func TestStatsTotalDuration(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Single uncompleted transfer", func(t *testing.T) {
|
t.Run("Single uncompleted transfer", func(t *testing.T) {
|
||||||
s := NewStats()
|
s := NewStats(ctx)
|
||||||
tr1 := &Transfer{
|
tr1 := &Transfer{
|
||||||
startedAt: time1,
|
startedAt: time1,
|
||||||
}
|
}
|
||||||
|
@ -174,7 +177,7 @@ func TestStatsTotalDuration(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Overlapping without ending", func(t *testing.T) {
|
t.Run("Overlapping without ending", func(t *testing.T) {
|
||||||
s := NewStats()
|
s := NewStats(ctx)
|
||||||
tr1 := &Transfer{
|
tr1 := &Transfer{
|
||||||
startedAt: time2,
|
startedAt: time2,
|
||||||
completedAt: time3,
|
completedAt: time3,
|
||||||
|
@ -218,7 +221,7 @@ func TestStatsTotalDuration(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Mixed completed and uncompleted transfers", func(t *testing.T) {
|
t.Run("Mixed completed and uncompleted transfers", func(t *testing.T) {
|
||||||
s := NewStats()
|
s := NewStats(ctx)
|
||||||
s.AddTransfer(&Transfer{
|
s.AddTransfer(&Transfer{
|
||||||
startedAt: time1,
|
startedAt: time1,
|
||||||
completedAt: time2,
|
completedAt: time2,
|
||||||
|
@ -382,6 +385,7 @@ func TestTimeRangeDuration(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPruneTransfers(t *testing.T) {
|
func TestPruneTransfers(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
Name string
|
Name string
|
||||||
Transfers int
|
Transfers int
|
||||||
|
@ -406,7 +410,7 @@ func TestPruneTransfers(t *testing.T) {
|
||||||
MaxCompletedTransfers = test.Limit
|
MaxCompletedTransfers = test.Limit
|
||||||
defer func() { MaxCompletedTransfers = prevLimit }()
|
defer func() { MaxCompletedTransfers = prevLimit }()
|
||||||
|
|
||||||
s := NewStats()
|
s := NewStats(ctx)
|
||||||
for i := int64(1); i <= int64(test.Transfers); i++ {
|
for i := int64(1); i <= int64(test.Transfers); i++ {
|
||||||
s.AddTransfer(&Transfer{
|
s.AddTransfer(&Transfer{
|
||||||
startedAt: time.Unix(i, 0),
|
startedAt: time.Unix(i, 0),
|
||||||
|
|
|
@ -35,7 +35,7 @@ func newTokenBucket(bandwidth fs.SizeSuffix) *rate.Limiter {
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartTokenBucket starts the token bucket if necessary
|
// StartTokenBucket starts the token bucket if necessary
|
||||||
func StartTokenBucket() {
|
func StartTokenBucket(ctx context.Context) {
|
||||||
currLimitMu.Lock()
|
currLimitMu.Lock()
|
||||||
currLimit := fs.Config.BwLimit.LimitAt(time.Now())
|
currLimit := fs.Config.BwLimit.LimitAt(time.Now())
|
||||||
currLimitMu.Unlock()
|
currLimitMu.Unlock()
|
||||||
|
@ -51,7 +51,7 @@ func StartTokenBucket() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartTokenTicker creates a ticker to update the bandwidth limiter every minute.
|
// StartTokenTicker creates a ticker to update the bandwidth limiter every minute.
|
||||||
func StartTokenTicker() {
|
func StartTokenTicker(ctx context.Context) {
|
||||||
// If the timetable has a single entry or was not specified, we don't need
|
// If the timetable has a single entry or was not specified, we don't need
|
||||||
// a ticker to update the bandwidth.
|
// a ticker to update the bandwidth.
|
||||||
if len(fs.Config.BwLimit) <= 1 {
|
if len(fs.Config.BwLimit) <= 1 {
|
||||||
|
|
|
@ -86,7 +86,7 @@ func newTransferRemoteSize(stats *StatsInfo, remote string, size int64, checking
|
||||||
|
|
||||||
// Done ends the transfer.
|
// Done ends the transfer.
|
||||||
// Must be called after transfer is finished to run proper cleanups.
|
// Must be called after transfer is finished to run proper cleanups.
|
||||||
func (tr *Transfer) Done(err error) {
|
func (tr *Transfer) Done(ctx context.Context, err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = tr.stats.Error(err)
|
err = tr.stats.Error(err)
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ func (tr *Transfer) Done(err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset allows to switch the Account to another transfer method.
|
// Reset allows to switch the Account to another transfer method.
|
||||||
func (tr *Transfer) Reset() {
|
func (tr *Transfer) Reset(ctx context.Context) {
|
||||||
tr.mu.RLock()
|
tr.mu.RLock()
|
||||||
acc := tr.acc
|
acc := tr.acc
|
||||||
tr.acc = nil
|
tr.acc = nil
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package accounting
|
package accounting
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -88,7 +89,7 @@ func (tm *transferMap) _sortedSlice() []*Transfer {
|
||||||
|
|
||||||
// String returns string representation of map items excluding any in
|
// String returns string representation of map items excluding any in
|
||||||
// exclude (if set).
|
// exclude (if set).
|
||||||
func (tm *transferMap) String(progress *inProgress, exclude *transferMap) string {
|
func (tm *transferMap) String(ctx context.Context, progress *inProgress, exclude *transferMap) string {
|
||||||
tm.mu.RLock()
|
tm.mu.RLock()
|
||||||
defer tm.mu.RUnlock()
|
defer tm.mu.RUnlock()
|
||||||
stringList := make([]string, 0, len(tm.items))
|
stringList := make([]string, 0, len(tm.items))
|
||||||
|
|
|
@ -4,6 +4,7 @@ package config
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
@ -113,7 +114,7 @@ func init() {
|
||||||
|
|
||||||
func getConfigData() *goconfig.ConfigFile {
|
func getConfigData() *goconfig.ConfigFile {
|
||||||
if configFile == nil {
|
if configFile == nil {
|
||||||
LoadConfig()
|
LoadConfig(context.Background())
|
||||||
}
|
}
|
||||||
return configFile
|
return configFile
|
||||||
}
|
}
|
||||||
|
@ -212,7 +213,7 @@ func makeConfigPath() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadConfig loads the config file
|
// LoadConfig loads the config file
|
||||||
func LoadConfig() {
|
func LoadConfig(ctx context.Context) {
|
||||||
// Set RCLONE_CONFIG_DIR for backend config and subprocesses
|
// Set RCLONE_CONFIG_DIR for backend config and subprocesses
|
||||||
_ = os.Setenv("RCLONE_CONFIG_DIR", filepath.Dir(ConfigPath))
|
_ = os.Setenv("RCLONE_CONFIG_DIR", filepath.Dir(ConfigPath))
|
||||||
|
|
||||||
|
@ -229,10 +230,10 @@ func LoadConfig() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the token bucket limiter
|
// Start the token bucket limiter
|
||||||
accounting.StartTokenBucket()
|
accounting.StartTokenBucket(ctx)
|
||||||
|
|
||||||
// Start the bandwidth update ticker
|
// Start the bandwidth update ticker
|
||||||
accounting.StartTokenTicker()
|
accounting.StartTokenTicker(ctx)
|
||||||
|
|
||||||
// Start the transactions per second limiter
|
// Start the transactions per second limiter
|
||||||
fshttp.StartHTTPTokenBucket()
|
fshttp.StartHTTPTokenBucket()
|
||||||
|
|
|
@ -2,6 +2,7 @@ package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
@ -15,6 +16,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func testConfigFile(t *testing.T, configFileName string) func() {
|
func testConfigFile(t *testing.T, configFileName string) func() {
|
||||||
|
ctx := context.Background()
|
||||||
configKey = nil // reset password
|
configKey = nil // reset password
|
||||||
_ = os.Unsetenv("_RCLONE_CONFIG_KEY_FILE")
|
_ = os.Unsetenv("_RCLONE_CONFIG_KEY_FILE")
|
||||||
_ = os.Unsetenv("RCLONE_CONFIG_PASS")
|
_ = os.Unsetenv("RCLONE_CONFIG_PASS")
|
||||||
|
@ -36,7 +38,7 @@ func testConfigFile(t *testing.T, configFileName string) func() {
|
||||||
fs.Config = &fs.ConfigInfo{}
|
fs.Config = &fs.ConfigInfo{}
|
||||||
configFile = nil
|
configFile = nil
|
||||||
|
|
||||||
LoadConfig()
|
LoadConfig(ctx)
|
||||||
assert.Equal(t, []string{}, getConfigData().GetSectionList())
|
assert.Equal(t, []string{}, getConfigData().GetSectionList())
|
||||||
|
|
||||||
// Fake a remote
|
// Fake a remote
|
||||||
|
|
|
@ -116,7 +116,7 @@ func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) {
|
||||||
func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
|
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(err)
|
tr.Done(ctx, err)
|
||||||
}()
|
}()
|
||||||
if sizeDiffers(src, dst) {
|
if sizeDiffers(src, dst) {
|
||||||
err = errors.Errorf("Sizes differ")
|
err = errors.Errorf("Sizes differ")
|
||||||
|
@ -323,7 +323,7 @@ func checkIdenticalDownload(ctx context.Context, dst, src fs.Object) (differ boo
|
||||||
}
|
}
|
||||||
tr1 := accounting.Stats(ctx).NewTransfer(dst)
|
tr1 := accounting.Stats(ctx).NewTransfer(dst)
|
||||||
defer func() {
|
defer func() {
|
||||||
tr1.Done(nil) // error handling is done by the caller
|
tr1.Done(ctx, nil) // error handling is done by the caller
|
||||||
}()
|
}()
|
||||||
in1 = tr1.Account(ctx, in1).WithBuffer() // account and buffer the transfer
|
in1 = tr1.Account(ctx, in1).WithBuffer() // account and buffer the transfer
|
||||||
|
|
||||||
|
@ -333,7 +333,7 @@ func checkIdenticalDownload(ctx context.Context, dst, src fs.Object) (differ boo
|
||||||
}
|
}
|
||||||
tr2 := accounting.Stats(ctx).NewTransfer(dst)
|
tr2 := accounting.Stats(ctx).NewTransfer(dst)
|
||||||
defer func() {
|
defer func() {
|
||||||
tr2.Done(nil) // error handling is done by the caller
|
tr2.Done(ctx, nil) // error handling is done by the caller
|
||||||
}()
|
}()
|
||||||
in2 = tr2.Account(ctx, in2).WithBuffer() // account and buffer the transfer
|
in2 = tr2.Account(ctx, in2).WithBuffer() // account and buffer the transfer
|
||||||
|
|
||||||
|
|
|
@ -132,7 +132,7 @@ func TestMultithreadCopy(t *testing.T) {
|
||||||
tr := accounting.GlobalStats().NewTransfer(src)
|
tr := accounting.GlobalStats().NewTransfer(src)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(err)
|
tr.Done(ctx, err)
|
||||||
}()
|
}()
|
||||||
dst, err := multiThreadCopy(ctx, r.Flocal, "file1", src, 2, tr)
|
dst, err := multiThreadCopy(ctx, r.Flocal, "file1", src, 2, tr)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -359,7 +359,7 @@ func CommonHash(fa, fb fs.Info) (hash.Type, *fs.HashesOption) {
|
||||||
func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
|
func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
|
||||||
tr := accounting.Stats(ctx).NewTransfer(src)
|
tr := accounting.Stats(ctx).NewTransfer(src)
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(err)
|
tr.Done(ctx, err)
|
||||||
}()
|
}()
|
||||||
newDst = dst
|
newDst = dst
|
||||||
if SkipDestructive(ctx, src, "copy") {
|
if SkipDestructive(ctx, src, "copy") {
|
||||||
|
@ -401,7 +401,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||||
_ = in.Close()
|
_ = in.Close()
|
||||||
}
|
}
|
||||||
if err == fs.ErrorCantCopy {
|
if err == fs.ErrorCantCopy {
|
||||||
tr.Reset() // skip incomplete accounting - will be overwritten by the manual copy below
|
tr.Reset(ctx) // skip incomplete accounting - will be overwritten by the manual copy below
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err = fs.ErrorCantCopy
|
err = fs.ErrorCantCopy
|
||||||
|
@ -478,7 +478,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||||
// Retry if err returned a retry error
|
// Retry if err returned a retry error
|
||||||
if fserrors.IsRetryError(err) || fserrors.ShouldRetry(err) {
|
if fserrors.IsRetryError(err) || fserrors.ShouldRetry(err) {
|
||||||
fs.Debugf(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries)
|
fs.Debugf(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries)
|
||||||
tr.Reset() // skip incomplete accounting - will be overwritten by retry
|
tr.Reset(ctx) // skip incomplete accounting - will be overwritten by retry
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// otherwise finish
|
// otherwise finish
|
||||||
|
@ -550,7 +550,7 @@ func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.
|
||||||
if err == nil {
|
if err == nil {
|
||||||
accounting.Stats(ctx).Renames(1)
|
accounting.Stats(ctx).Renames(1)
|
||||||
}
|
}
|
||||||
tr.Done(err)
|
tr.Done(ctx, err)
|
||||||
}()
|
}()
|
||||||
newDst = dst
|
newDst = dst
|
||||||
if SkipDestructive(ctx, src, "move") {
|
if SkipDestructive(ctx, src, "move") {
|
||||||
|
@ -627,7 +627,7 @@ func SuffixName(remote string) string {
|
||||||
func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) {
|
func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) {
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(dst)
|
tr := accounting.Stats(ctx).NewCheckingTransfer(dst)
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(err)
|
tr.Done(ctx, err)
|
||||||
}()
|
}()
|
||||||
numDeletes := accounting.Stats(ctx).Deletes(1)
|
numDeletes := accounting.Stats(ctx).Deletes(1)
|
||||||
if fs.Config.MaxDelete != -1 && numDeletes > fs.Config.MaxDelete {
|
if fs.Config.MaxDelete != -1 && numDeletes > fs.Config.MaxDelete {
|
||||||
|
@ -817,7 +817,7 @@ func ListLong(ctx context.Context, f fs.Fs, w io.Writer) error {
|
||||||
return ListFn(ctx, f, func(o fs.Object) {
|
return ListFn(ctx, f, func(o fs.Object) {
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
|
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(nil)
|
tr.Done(ctx, nil)
|
||||||
}()
|
}()
|
||||||
modTime := o.ModTime(ctx)
|
modTime := o.ModTime(ctx)
|
||||||
syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote())
|
syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote())
|
||||||
|
@ -850,7 +850,7 @@ func hashSum(ctx context.Context, ht hash.Type, o fs.Object) (string, error) {
|
||||||
var err error
|
var err error
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
|
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(err)
|
tr.Done(ctx, err)
|
||||||
}()
|
}()
|
||||||
sum, err := o.Hash(ctx, ht)
|
sum, err := o.Hash(ctx, ht)
|
||||||
if err == hash.ErrUnsupported {
|
if err == hash.ErrUnsupported {
|
||||||
|
@ -1058,7 +1058,7 @@ func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error {
|
||||||
var err error
|
var err error
|
||||||
tr := accounting.Stats(ctx).NewTransfer(o)
|
tr := accounting.Stats(ctx).NewTransfer(o)
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(err)
|
tr.Done(ctx, err)
|
||||||
}()
|
}()
|
||||||
opt := fs.RangeOption{Start: offset, End: -1}
|
opt := fs.RangeOption{Start: offset, End: -1}
|
||||||
size := o.Size()
|
size := o.Size()
|
||||||
|
@ -1100,7 +1100,7 @@ func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error {
|
||||||
func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst fs.Object, err error) {
|
func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst fs.Object, err error) {
|
||||||
tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, -1)
|
tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, -1)
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(err)
|
tr.Done(ctx, err)
|
||||||
}()
|
}()
|
||||||
in = tr.Account(ctx, in).WithBuffer()
|
in = tr.Account(ctx, in).WithBuffer()
|
||||||
|
|
||||||
|
@ -1447,7 +1447,7 @@ func RcatSize(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadClo
|
||||||
// Size known use Put
|
// Size known use Put
|
||||||
tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, size)
|
tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, size)
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(err)
|
tr.Done(ctx, err)
|
||||||
}()
|
}()
|
||||||
body := ioutil.NopCloser(in) // we let the server close the body
|
body := ioutil.NopCloser(in) // we let the server close the body
|
||||||
in := tr.Account(ctx, body) // account the transfer (no buffering)
|
in := tr.Account(ctx, body) // account the transfer (no buffering)
|
||||||
|
@ -1624,7 +1624,7 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
|
||||||
}
|
}
|
||||||
tr := accounting.Stats(ctx).NewTransfer(srcObj)
|
tr := accounting.Stats(ctx).NewTransfer(srcObj)
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(err)
|
tr.Done(ctx, err)
|
||||||
}()
|
}()
|
||||||
tmpObj, err := Op(ctx, fdst, nil, tmpObjName, srcObj)
|
tmpObj, err := Op(ctx, fdst, nil, tmpObjName, srcObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1673,7 +1673,7 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
|
||||||
if !cp {
|
if !cp {
|
||||||
err = DeleteFile(ctx, srcObj)
|
err = DeleteFile(ctx, srcObj)
|
||||||
}
|
}
|
||||||
tr.Done(err)
|
tr.Done(ctx, err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,7 +42,7 @@ var promHandler http.Handler
|
||||||
var onlyOnceWarningAllowOrigin sync.Once
|
var onlyOnceWarningAllowOrigin sync.Once
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
rcloneCollector := accounting.NewRcloneCollector()
|
rcloneCollector := accounting.NewRcloneCollector(context.Background())
|
||||||
prometheus.MustRegister(rcloneCollector)
|
prometheus.MustRegister(rcloneCollector)
|
||||||
promHandler = promhttp.Handler()
|
promHandler = promhttp.Handler()
|
||||||
}
|
}
|
||||||
|
|
|
@ -344,7 +344,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.W
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tr.Done(err)
|
tr.Done(s.ctx, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -749,7 +749,7 @@ func (s *syncCopyMove) makeRenameMap() {
|
||||||
s.pushRenameMap(hash, obj)
|
s.pushRenameMap(hash, obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
tr.Done(nil)
|
tr.Done(s.ctx, nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
|
@ -58,6 +58,7 @@ func init() {
|
||||||
|
|
||||||
// Initialise rclone for testing
|
// Initialise rclone for testing
|
||||||
func Initialise() {
|
func Initialise() {
|
||||||
|
ctx := context.Background()
|
||||||
// Never ask for passwords, fail instead.
|
// Never ask for passwords, fail instead.
|
||||||
// If your local config is encrypted set environment variable
|
// If your local config is encrypted set environment variable
|
||||||
// "RCLONE_CONFIG_PASS=hunter2" (or your password)
|
// "RCLONE_CONFIG_PASS=hunter2" (or your password)
|
||||||
|
@ -68,7 +69,7 @@ func Initialise() {
|
||||||
if envConfig := os.Getenv("RCLONE_CONFIG"); envConfig != "" {
|
if envConfig := os.Getenv("RCLONE_CONFIG"); envConfig != "" {
|
||||||
config.ConfigPath = envConfig
|
config.ConfigPath = envConfig
|
||||||
}
|
}
|
||||||
config.LoadConfig()
|
config.LoadConfig(ctx)
|
||||||
if *Verbose {
|
if *Verbose {
|
||||||
fs.Config.LogLevel = fs.LogLevelDebug
|
fs.Config.LogLevel = fs.LogLevelDebug
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ import (
|
||||||
// ReadFileHandle is an open for read file handle on a File
|
// ReadFileHandle is an open for read file handle on a File
|
||||||
type ReadFileHandle struct {
|
type ReadFileHandle struct {
|
||||||
baseHandle
|
baseHandle
|
||||||
done func(err error)
|
done func(ctx context.Context, err error)
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
cond *sync.Cond // cond lock for out of sequence reads
|
cond *sync.Cond // cond lock for out of sequence reads
|
||||||
closed bool // set if handle has been closed
|
closed bool // set if handle has been closed
|
||||||
|
@ -414,7 +414,7 @@ func (fh *ReadFileHandle) close() error {
|
||||||
if fh.opened {
|
if fh.opened {
|
||||||
var err error
|
var err error
|
||||||
defer func() {
|
defer func() {
|
||||||
fh.done(err)
|
fh.done(context.TODO(), err)
|
||||||
}()
|
}()
|
||||||
// Close first so that we have hashes
|
// Close first so that we have hashes
|
||||||
err = fh.r.Close()
|
err = fh.r.Close()
|
||||||
|
|
|
@ -553,7 +553,7 @@ func (dl *downloader) close(inErr error) (err error) {
|
||||||
dl.in = nil
|
dl.in = nil
|
||||||
}
|
}
|
||||||
if dl.tr != nil {
|
if dl.tr != nil {
|
||||||
dl.tr.Done(inErr)
|
dl.tr.Done(dl.dls.ctx, inErr)
|
||||||
dl.tr = nil
|
dl.tr = nil
|
||||||
}
|
}
|
||||||
dl._closed = true
|
dl._closed = true
|
||||||
|
|
Loading…
Reference in New Issue
Block a user