mirror of
https://github.com/rclone/rclone.git
synced 2024-11-25 17:57:44 +08:00
azureblob/b2/dropbox/gcs/koofr/qingstor/s3: fix 0 length files
In 0386d22cc9
we introduced a test for 0 length files read the
way mount does.
This test failed on these backends which we fix up here.
This commit is contained in:
parent
27a075e9fc
commit
e502be475a
|
@ -13,6 +13,7 @@ import (
|
|||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
@ -1185,7 +1186,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||
if o.AccessTier() == azblob.AccessTierArchive {
|
||||
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
|
||||
}
|
||||
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
|
@ -1205,6 +1206,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||
ac := azblob.BlobAccessConditions{}
|
||||
var dowloadResponse *azblob.DownloadResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
log.Printf("offset=%d, count=%v", offset, count)
|
||||
dowloadResponse, err = blob.Download(ctx, offset, count, ac, false)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
|
|
|
@ -1531,6 +1531,7 @@ var _ io.ReadCloser = &openFile{}
|
|||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.size)
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Options: options,
|
||||
|
|
|
@ -975,6 +975,7 @@ func (o *Object) Storable() bool {
|
|||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
|
|
|
@ -966,6 +966,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
|
|
|
@ -154,6 +154,7 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
|||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
var sOff, eOff int64 = 0, -1
|
||||
|
||||
fs.FixRangeOption(options, o.Size())
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
|
@ -170,13 +171,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
|||
if sOff == 0 && eOff < 0 {
|
||||
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
|
||||
}
|
||||
if sOff < 0 {
|
||||
sOff = o.Size() - eOff
|
||||
eOff = o.Size()
|
||||
}
|
||||
if eOff > o.Size() {
|
||||
eOff = o.Size()
|
||||
}
|
||||
span := &koofrclient.FileSpan{
|
||||
Start: sOff,
|
||||
End: eOff,
|
||||
|
|
|
@ -964,6 +964,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
|||
|
||||
key := o.fs.root + o.remote
|
||||
req := qs.GetObjectInput{}
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.RangeOption, *fs.SeekOption:
|
||||
|
|
|
@ -1765,6 +1765,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||
Bucket: &o.fs.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.RangeOption, *fs.SeekOption:
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -288,12 +289,19 @@ func TestWriteFileModTimeWithOpenWriters(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFileZeroLength(t *testing.T) {
|
||||
func testFileReadAt(t *testing.T, n int) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, fh := writeHandleCreate(t, r)
|
||||
|
||||
// Close the file without writing to it
|
||||
contents := []byte(random.String(n))
|
||||
if n != 0 {
|
||||
written, err := fh.Write(contents)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, n, written)
|
||||
}
|
||||
|
||||
// Close the file without writing to it if n==0
|
||||
err := fh.Close()
|
||||
if errors.Cause(err) == fs.ErrorCantUploadEmptyFiles {
|
||||
t.Logf("skipping test: %v", err)
|
||||
|
@ -301,18 +309,27 @@ func TestFileZeroLength(t *testing.T) {
|
|||
}
|
||||
assert.NoError(t, err)
|
||||
|
||||
// read the 0 length file back in using ReadAt into a buffer
|
||||
// read the file back in using ReadAt into a buffer
|
||||
// this simulates what mount does
|
||||
rd, err := vfs.OpenFile("file1", os.O_RDONLY, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
n, err := rd.ReadAt(buf, 0)
|
||||
read, err := rd.ReadAt(buf, 0)
|
||||
if err != io.EOF {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, 0, n)
|
||||
assert.Equal(t, read, n)
|
||||
assert.Equal(t, contents, buf[:read])
|
||||
|
||||
err = rd.Close()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestFileReadAtZeroLength(t *testing.T) {
|
||||
testFileReadAt(t, 0)
|
||||
}
|
||||
|
||||
func TestFileReadAtNonZeroLength(t *testing.T) {
|
||||
testFileReadAt(t, 100)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user