mirror of
https://github.com/rclone/rclone.git
synced 2024-11-29 03:48:27 +08:00
Fix integration tests so they can be run independently and out of order - fixes #291
* Make all integration tests start with an empty remote * Add an -individual flag so this can be a different bucket/container/directory * Fix up tests after changing the hashers * Add sha1sum test * Make directory checking in tests sleep more to fix acd inconsistencies * Factor integration tests to make more maintainable * Ensure remote writes have a fstest.CheckItems() before use * this fixes eventual consistency on the directory listings later * Call fs.Stats.ResetCounters() before every fs.Sync() Note that the tests shouldn't be run concurrently as fs.Config is global state.
This commit is contained in:
parent
ccba859812
commit
eda4130703
|
@ -555,6 +555,7 @@ func MoveDir(fdst, fsrc Fs) error {
|
||||||
|
|
||||||
// Check the files in fsrc and fdst according to Size and hash
|
// Check the files in fsrc and fdst according to Size and hash
|
||||||
func Check(fdst, fsrc Fs) error {
|
func Check(fdst, fsrc Fs) error {
|
||||||
|
differences := int32(0)
|
||||||
var (
|
var (
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
dstFiles, srcFiles map[string]Object
|
dstFiles, srcFiles map[string]Object
|
||||||
|
@ -597,12 +598,14 @@ func Check(fdst, fsrc Fs) error {
|
||||||
for _, dst := range dstFiles {
|
for _, dst := range dstFiles {
|
||||||
Stats.Error()
|
Stats.Error()
|
||||||
ErrorLog(dst, "File not in %v", fsrc)
|
ErrorLog(dst, "File not in %v", fsrc)
|
||||||
|
atomic.AddInt32(&differences, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
Log(fsrc, "%d files not in %s", len(srcFiles), fdst)
|
Log(fsrc, "%d files not in %s", len(srcFiles), fdst)
|
||||||
for _, src := range srcFiles {
|
for _, src := range srcFiles {
|
||||||
Stats.Error()
|
Stats.Error()
|
||||||
ErrorLog(src, "File not in %v", fdst)
|
ErrorLog(src, "File not in %v", fdst)
|
||||||
|
atomic.AddInt32(&differences, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
checks := make(chan []Object, Config.Transfers)
|
checks := make(chan []Object, Config.Transfers)
|
||||||
|
@ -625,6 +628,7 @@ func Check(fdst, fsrc Fs) error {
|
||||||
Stats.DoneChecking(src)
|
Stats.DoneChecking(src)
|
||||||
Stats.Error()
|
Stats.Error()
|
||||||
ErrorLog(src, "Sizes differ")
|
ErrorLog(src, "Sizes differ")
|
||||||
|
atomic.AddInt32(&differences, 1)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
same, _, err := CheckHashes(src, dst)
|
same, _, err := CheckHashes(src, dst)
|
||||||
|
@ -634,6 +638,7 @@ func Check(fdst, fsrc Fs) error {
|
||||||
}
|
}
|
||||||
if !same {
|
if !same {
|
||||||
Stats.Error()
|
Stats.Error()
|
||||||
|
atomic.AddInt32(&differences, 1)
|
||||||
ErrorLog(src, "Md5sums differ")
|
ErrorLog(src, "Md5sums differ")
|
||||||
}
|
}
|
||||||
Debug(src, "OK")
|
Debug(src, "OK")
|
||||||
|
@ -644,8 +649,8 @@ func Check(fdst, fsrc Fs) error {
|
||||||
Log(fdst, "Waiting for checks to finish")
|
Log(fdst, "Waiting for checks to finish")
|
||||||
checkerWg.Wait()
|
checkerWg.Wait()
|
||||||
Log(fdst, "%d differences found", Stats.GetErrors())
|
Log(fdst, "%d differences found", Stats.GetErrors())
|
||||||
if Stats.GetErrors() > 0 {
|
if differences > 0 {
|
||||||
return fmt.Errorf("%d differences found", Stats.GetErrors())
|
return fmt.Errorf("%d differences found", differences)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -4,6 +4,8 @@ package fstest
|
||||||
// FIXME put name of test FS in Fs structure
|
// FIXME put name of test FS in Fs structure
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
@ -31,6 +33,23 @@ type Item struct {
|
||||||
WinPath string
|
WinPath string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewItem creates an item from a string content
|
||||||
|
func NewItem(Path, Content string, modTime time.Time) Item {
|
||||||
|
i := Item{
|
||||||
|
Path: Path,
|
||||||
|
ModTime: modTime,
|
||||||
|
Size: int64(len(Content)),
|
||||||
|
}
|
||||||
|
hash := fs.NewMultiHasher()
|
||||||
|
buf := bytes.NewBufferString(Content)
|
||||||
|
_, err := io.Copy(hash, buf)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to create item: %v", err)
|
||||||
|
}
|
||||||
|
i.Hashes = hash.Sums()
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
// CheckTimeEqualWithPrecision checks the times are equal within the
|
// CheckTimeEqualWithPrecision checks the times are equal within the
|
||||||
// precision, returns the delta and a flag
|
// precision, returns the delta and a flag
|
||||||
func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
|
func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
|
||||||
|
@ -129,17 +148,26 @@ func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, precision ti
|
||||||
is := NewItems(items)
|
is := NewItems(items)
|
||||||
oldErrors := fs.Stats.GetErrors()
|
oldErrors := fs.Stats.GetErrors()
|
||||||
var objs []fs.Object
|
var objs []fs.Object
|
||||||
const retries = 10
|
const retries = 6
|
||||||
|
sleep := time.Second / 2
|
||||||
for i := 1; i <= retries; i++ {
|
for i := 1; i <= retries; i++ {
|
||||||
objs = nil
|
objs = nil
|
||||||
for obj := range f.List() {
|
for obj := range f.List() {
|
||||||
objs = append(objs, obj)
|
objs = append(objs, obj)
|
||||||
}
|
}
|
||||||
if len(objs) == len(items) {
|
if len(objs) == len(items) {
|
||||||
|
// Put an extra sleep in if we did any retries just to make sure it really
|
||||||
|
// is consistent (here is looking at you Amazon Cloud Drive!)
|
||||||
|
if i != 1 {
|
||||||
|
extraSleep := 5*time.Second + sleep
|
||||||
|
t.Logf("Sleeping for %v just to make sure", extraSleep)
|
||||||
|
time.Sleep(extraSleep)
|
||||||
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
t.Logf("Sleeping for 1 second for list eventual consistency: %d/%d", i, retries)
|
sleep *= 2
|
||||||
time.Sleep(1 * time.Second)
|
t.Logf("Sleeping for %v for list eventual consistency: %d/%d", sleep, i, retries)
|
||||||
|
time.Sleep(sleep)
|
||||||
}
|
}
|
||||||
for _, obj := range objs {
|
for _, obj := range objs {
|
||||||
if obj == nil {
|
if obj == nil {
|
||||||
|
@ -161,6 +189,12 @@ func CheckListing(t *testing.T, f fs.Fs, items []Item) {
|
||||||
CheckListingWithPrecision(t, f, items, precision)
|
CheckListingWithPrecision(t, f, items, precision)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CheckItems checks the fs to see if it has only the items passed in
|
||||||
|
// using a precision of fs.Config.ModifyWindow
|
||||||
|
func CheckItems(t *testing.T, f fs.Fs, items ...Item) {
|
||||||
|
CheckListingWithPrecision(t, f, items, fs.Config.ModifyWindow)
|
||||||
|
}
|
||||||
|
|
||||||
// Time parses a time string or logs a fatal error
|
// Time parses a time string or logs a fatal error
|
||||||
func Time(timeString string) time.Time {
|
func Time(timeString string) time.Time {
|
||||||
t, err := time.Parse(time.RFC3339Nano, timeString)
|
t, err := time.Parse(time.RFC3339Nano, timeString)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user