fstest: let backends advertise maximum file size

This commit is contained in:
Ivan Andreev 2019-09-16 20:52:41 +03:00 committed by Nick Craig-Wood
parent 1e4691f951
commit 661dc568f3
6 changed files with 34 additions and 2 deletions

View File

@ -115,6 +115,9 @@ func TestMultithreadCopy(t *testing.T) {
{size: multithreadChunkSize*2 + 1, streams: 2}, {size: multithreadChunkSize*2 + 1, streams: 2},
} { } {
t.Run(fmt.Sprintf("%+v", test), func(t *testing.T) { t.Run(fmt.Sprintf("%+v", test), func(t *testing.T) {
if *fstest.SizeLimit > 0 && int64(test.size) > *fstest.SizeLimit {
t.Skipf("exceeded file size limit %d > %d", test.size, *fstest.SizeLimit)
}
var err error var err error
contents := random.String(test.size) contents := random.String(test.size)
t1 := fstest.Time("2001-02-03T04:05:06.499999999Z") t1 := fstest.Time("2001-02-03T04:05:06.499999999Z")

View File

@ -445,6 +445,15 @@ func TestRcat(t *testing.T) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
if *fstest.SizeLimit > 0 && int64(fs.Config.StreamingUploadCutoff) > *fstest.SizeLimit {
savedCutoff := fs.Config.StreamingUploadCutoff
defer func() {
fs.Config.StreamingUploadCutoff = savedCutoff
}()
fs.Config.StreamingUploadCutoff = fs.SizeSuffix(*fstest.SizeLimit)
t.Logf("Adjust StreamingUploadCutoff to size limit %s (was %s)", fs.Config.StreamingUploadCutoff, savedCutoff)
}
fstest.CheckListing(t, r.Fremote, []fstest.Item{}) fstest.CheckListing(t, r.Fremote, []fstest.Item{})
data1 := "this is some really nice test data" data1 := "this is some really nice test data"

View File

@ -42,6 +42,8 @@ var (
Individual = flag.Bool("individual", false, "Make individual bucket/container/directory for each test - much slower") Individual = flag.Bool("individual", false, "Make individual bucket/container/directory for each test - much slower")
LowLevelRetries = flag.Int("low-level-retries", 10, "Number of low level retries") LowLevelRetries = flag.Int("low-level-retries", 10, "Number of low level retries")
UseListR = flag.Bool("fast-list", false, "Use recursive list if available. Uses more memory but fewer transactions.") UseListR = flag.Bool("fast-list", false, "Use recursive list if available. Uses more memory but fewer transactions.")
// SizeLimit signals tests to skip maximum test file size and skip inappropriate runs
SizeLimit = flag.Int64("size-limit", 0, "Limit maximum test file size")
// ListRetries is the number of times to retry a listing to overcome eventual consistency // ListRetries is the number of times to retry a listing to overcome eventual consistency
ListRetries = flag.Int("list-retries", 6, "Number or times to retry listing") ListRetries = flag.Int("list-retries", 6, "Number or times to retry listing")
// MatchTestRemote matches the remote names used for testing // MatchTestRemote matches the remote names used for testing

View File

@ -557,9 +557,14 @@ func Run(t *testing.T, opt *Opt) {
t.Run("FsPutError", func(t *testing.T) { t.Run("FsPutError", func(t *testing.T) {
skipIfNotOk(t) skipIfNotOk(t)
const N = 5 * 1024 var N int64 = 5 * 1024
if *fstest.SizeLimit > 0 && N > *fstest.SizeLimit {
N = *fstest.SizeLimit
t.Logf("Reduce file size due to limit %d", N)
}
// Read N bytes then produce an error // Read N bytes then produce an error
contents := random.String(N) contents := random.String(int(N))
buf := bytes.NewBufferString(contents) buf := bytes.NewBufferString(contents)
er := &errorReader{errors.New("potato")} er := &errorReader{errors.New("potato")}
in := io.MultiReader(buf, er) in := io.MultiReader(buf, er)

View File

@ -34,6 +34,7 @@ type Backend struct {
FastList bool // set to test with -fast-list FastList bool // set to test with -fast-list
Short bool // set to test with -short Short bool // set to test with -short
OneOnly bool // set to run only one backend test at once OneOnly bool // set to run only one backend test at once
MaxFile string // file size limit
Ignore []string // test names to ignore the failure of Ignore []string // test names to ignore the failure of
Tests []string // paths of tests to run, blank for all Tests []string // paths of tests to run, blank for all
} }
@ -60,6 +61,12 @@ func (b *Backend) MakeRuns(t *Test) (runs []*Run) {
if !b.includeTest(t) { if !b.includeTest(t) {
return runs return runs
} }
maxSize := fs.SizeSuffix(0)
if b.MaxFile != "" {
if err := maxSize.Set(b.MaxFile); err != nil {
log.Printf("Invalid maxfile value %q: %v", b.MaxFile, err)
}
}
fastlists := []bool{false} fastlists := []bool{false}
if b.FastList && t.FastList { if b.FastList && t.FastList {
fastlists = append(fastlists, true) fastlists = append(fastlists, true)
@ -81,6 +88,7 @@ func (b *Backend) MakeRuns(t *Test) (runs []*Run) {
NoRetries: t.NoRetries, NoRetries: t.NoRetries,
OneOnly: b.OneOnly, OneOnly: b.OneOnly,
NoBinary: t.NoBinary, NoBinary: t.NoBinary,
SizeLimit: int64(maxSize),
Ignore: ignore, Ignore: ignore,
} }
if t.AddBackend { if t.AddBackend {

View File

@ -16,6 +16,7 @@ import (
"regexp" "regexp"
"runtime" "runtime"
"sort" "sort"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -43,6 +44,7 @@ type Run struct {
NoRetries bool // don't retry if set NoRetries bool // don't retry if set
OneOnly bool // only run test for this backend at once OneOnly bool // only run test for this backend at once
NoBinary bool // set to not build a binary NoBinary bool // set to not build a binary
SizeLimit int64 // maximum test file size
Ignore map[string]struct{} Ignore map[string]struct{}
// Internals // Internals
cmdLine []string cmdLine []string
@ -339,6 +341,9 @@ func (r *Run) Init() {
if r.Short { if r.Short {
r.cmdLine = append(r.cmdLine, "-short") r.cmdLine = append(r.cmdLine, "-short")
} }
if r.SizeLimit > 0 {
r.cmdLine = append(r.cmdLine, "-size-limit", strconv.FormatInt(r.SizeLimit, 10))
}
r.cmdString = toShell(r.cmdLine) r.cmdString = toShell(r.cmdLine)
} }