build: fix errors spotted by ineffassign linter

These were mostly caused by shadowing err and a good fraction of them
will have caused errors not to be propagated properly.
This commit is contained in:
Nick Craig-Wood 2018-05-04 15:19:50 +01:00
parent 790a8a9aed
commit cb5bd47e61
19 changed files with 37 additions and 22 deletions

View File

@ -357,7 +357,8 @@ outer:
buf := up.f.getUploadBlock()
// Read the chunk
n, err := io.ReadFull(up.in, buf)
var n int
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
@ -366,7 +367,6 @@ outer:
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putUploadBlock(buf)
hasMoreParts = false
err = nil
break outer
} else if err != nil {

View File

@ -647,7 +647,6 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
// search for entry in source or temp fs
var obj fs.Object
err = nil
if f.tempWritePath != "" {
obj, err = f.tempFs.NewObject(remote)
// not found in temp fs

View File

@ -988,8 +988,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
require.Error(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
require.NoError(t, err)
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
require.Error(t, err)
var started bool
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
require.NoError(t, err)
require.False(t, started)

View File

@ -446,7 +446,6 @@ func (w *worker) run() {
continue
}
}
err = nil
} else {
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
continue
@ -493,7 +492,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
}
data = make([]byte, chunkEnd-chunkStart)
sourceRead := 0
var sourceRead int
sourceRead, err = io.ReadFull(w.rc, data)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)

View File

@ -151,6 +151,7 @@ func TestOpen(t *testing.T) {
fd, err := o.Open()
require.NoError(t, err)
data, err := ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "beetroot\n", string(data))
@ -158,6 +159,7 @@ func TestOpen(t *testing.T) {
fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}

View File

@ -593,7 +593,6 @@ func (o *Object) Hash(r hash.Type) (string, error) {
o.fs.objectHashesMu.Unlock()
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
hashes = make(map[hash.Type]string)
in, err := os.Open(o.path)
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")

View File

@ -44,9 +44,11 @@ func TestUpdatingCheck(t *testing.T) {
}
fi, err := fd.Stat()
require.NoError(t, err)
o := &Object{size: fi.Size(), modTime: fi.ModTime()}
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
hash, err := hash.NewMultiHasherTypes(hash.Supported)
require.NoError(t, err)
in := localOpenFile{
o: o,
in: wrappedFd,

View File

@ -391,7 +391,8 @@ func (f *Fs) list(dir string, recurse bool, fn addEntryFn) error {
err = fn(d)
} else {
// newObjectWithInfo does a full metadata read on 0 size objects which might be dynamic large objects
o, err := f.newObjectWithInfo(remote, object)
var o fs.Object
o, err = f.newObjectWithInfo(remote, object)
if err != nil {
return err
}
@ -847,7 +848,7 @@ func urlEncode(str string) string {
// container. It returns a string which prefixes current segments.
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
// Create the segmentsContainer if it doesn't exist
var err error = swift.ContainerNotFound
var err error
_, _, err = o.fs.c.Container(o.fs.segmentsContainer)
if err == swift.ContainerNotFound {
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, nil)

View File

@ -149,6 +149,9 @@ func getAsset(project string, matchName *regexp.Regexp) (string, string) {
log.Printf("Fetching asset info for %q from %q", project, url)
user, pass := os.Getenv("GITHUB_USER"), os.Getenv("GITHUB_TOKEN")
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatalf("Failed to make http request %q: %v", url, err)
}
if user != "" && pass != "" {
log.Printf("Fetching using GITHUB_USER and GITHUB_TOKEN")
req.SetBasicAuth(user, pass)

View File

@ -52,6 +52,9 @@ Print cache stats for a remote in JSON format
}
}
m, err := fsCache.Stats()
if err != nil {
return err
}
raw, err := json.MarshalIndent(m, "", " ")
if err != nil {

View File

@ -350,7 +350,6 @@ func (fsys *FS) Read(path string, buff []byte, ofst int64, fh uint64) (n int) {
}
n, err := handle.ReadAt(buff, ofst)
if err == io.EOF {
err = nil
} else if err != nil {
return translateError(err)
}

View File

@ -76,7 +76,7 @@ func TestMemoryFs(t *testing.T) {
assert.Error(t, err)
err = f.Rmdir("dir")
assert.Error(t, fs.ErrorDirNotFound)
assert.Equal(t, fs.ErrorDirNotFound, err)
}
func TestMemoryObject(t *testing.T) {

View File

@ -288,9 +288,9 @@ func Deduplicate(f fs.Fs, mode DeduplicateMode) error {
case DeduplicateRename:
dedupeRename(remote, objs)
case DeduplicateLargest:
size, largest, largestIndex := int64(0), int64(-1), -1
largest, largestIndex := int64(-1), -1
for i, obj := range objs {
size = obj.Size()
size := obj.Size()
if size > largest {
largest, largestIndex = size, i
}

View File

@ -90,7 +90,7 @@ func test(fileName string, flags int, mode string) {
_, readExistingErr = f.Read(buf)
// write some bytes
n, writeExistingErr = f.Write([]byte("HEL"))
_, writeExistingErr = f.Write([]byte("HEL"))
// close
err = f.Close()

View File

@ -105,7 +105,7 @@ func TestReadFileHandleSeek(t *testing.T) {
assert.Equal(t, "d", readString(t, fh, 1))
// Seek off the end
n, err = fh.Seek(100, io.SeekStart)
_, err = fh.Seek(100, io.SeekStart)
assert.NoError(t, err)
// Get the error on read
@ -168,7 +168,7 @@ func TestReadFileHandleReadAt(t *testing.T) {
// check noSeek gives an error
fh.noSeek = true
n, err = fh.ReadAt(buf, 100)
_, err = fh.ReadAt(buf, 100)
assert.Equal(t, ESPIPE, err)
// Properly close the file
@ -176,7 +176,7 @@ func TestReadFileHandleReadAt(t *testing.T) {
// check reading on closed file
fh.noSeek = true
n, err = fh.ReadAt(buf, 100)
_, err = fh.ReadAt(buf, 100)
assert.Equal(t, ECLOSED, err)
}

View File

@ -116,7 +116,7 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) {
if o != nil && fh.file.rwOpens() == 0 {
cacheObj, err := fh.d.vfs.cache.f.NewObject(fh.remote)
if err == nil && cacheObj != nil {
cacheObj, err = copyObj(fh.d.vfs.cache.f, cacheObj, fh.remote, o)
_, err = copyObj(fh.d.vfs.cache.f, cacheObj, fh.remote, o)
if err != nil {
return errors.Wrap(err, "open RW handle failed to update cached file")
}

View File

@ -155,7 +155,7 @@ func TestRWFileHandleSeek(t *testing.T) {
assert.Equal(t, "d", rwReadString(t, fh, 1))
// Seek off the end
n, err = fh.Seek(100, io.SeekStart)
_, err = fh.Seek(100, io.SeekStart)
assert.NoError(t, err)
// Get the error on read
@ -215,7 +215,7 @@ func TestRWFileHandleReadAt(t *testing.T) {
assert.NoError(t, fh.Close())
// check reading on closed file
n, err = fh.ReadAt(buf, 100)
_, err = fh.ReadAt(buf, 100)
assert.Equal(t, ECLOSED, err)
}
@ -351,6 +351,7 @@ func TestRWFileHandleMethodsWrite(t *testing.T) {
// check vfs
root, err := vfs.Root()
require.NoError(t, err)
checkListing(t, root, []string{"file1,11,false"})
// check the underlying r.Fremote but not the modtime
@ -399,6 +400,7 @@ func TestRWFileHandleWriteAt(t *testing.T) {
// check vfs
root, err := vfs.Root()
require.NoError(t, err)
checkListing(t, root, []string{"file1,11,false"})
// check the underlying r.Fremote but not the modtime
@ -427,6 +429,7 @@ func TestRWFileHandleWriteNoWrite(t *testing.T) {
// check vfs
root, err := vfs.Root()
require.NoError(t, err)
checkListing(t, root, []string{"file1,0,false", "file2,0,false"})
// check the underlying r.Fremote but not the modtime

View File

@ -187,10 +187,10 @@ func TestVFSStatParent(t *testing.T) {
assert.Equal(t, "/", node.Name())
assert.Equal(t, "not found", leaf)
node, leaf, err = vfs.StatParent("not found dir/not found")
_, _, err = vfs.StatParent("not found dir/not found")
assert.Equal(t, os.ErrNotExist, err)
node, leaf, err = vfs.StatParent("file1/under a file")
_, _, err = vfs.StatParent("file1/under a file")
assert.Equal(t, os.ErrExist, err)
}

View File

@ -85,6 +85,7 @@ func TestWriteFileHandleMethods(t *testing.T) {
// check vfs
root, err := vfs.Root()
require.NoError(t, err)
checkListing(t, root, []string{"file1,5,false"})
// check the underlying r.Fremote but not the modtime
@ -101,6 +102,7 @@ func TestWriteFileHandleMethods(t *testing.T) {
// Check trying to open the file and writing it now it exists
// returns an error
h, err = vfs.OpenFile("file1", os.O_WRONLY|os.O_CREATE, 0777)
require.NoError(t, err)
_, err = h.Write([]byte("hello1"))
require.Equal(t, EPERM, err)
assert.NoError(t, h.Close())
@ -115,6 +117,7 @@ func TestWriteFileHandleMethods(t *testing.T) {
// Check opening the file with O_TRUNC and writing does work
h, err = vfs.OpenFile("file1", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777)
require.NoError(t, err)
_, err = h.WriteString("hello12")
require.NoError(t, err)
assert.NoError(t, h.Close())
@ -159,6 +162,7 @@ func TestWriteFileHandleWriteAt(t *testing.T) {
// check vfs
root, err := vfs.Root()
require.NoError(t, err)
checkListing(t, root, []string{"file1,11,false"})
// check the underlying r.Fremote but not the modtime