serve s3: pre-merge tweaks

- Changes
    - Rename `--s3-authkey` to `--auth-key` to get it out of the s3 backend namespace
    - Enable `Content-MD5` integrity checks
    - Remove locking after code audit
- Documentation
    - Factor out documentation into seperate file
    - Add Quickstart to docs
    - Add Bugs section to docs
    - Add experimental tag to docs
    - Add rclone provider to s3 backend docs
- Fixes
    - Correct quirks in s3 backend
    - Change fmt.Printlns into fs.Logs
    - Make metadata storage per backend not global
    - Log on startup if anonymous access is enabled
- Coding style fixes
    - rename fs to vfs to save confusion with the rest of rclone code
    - rename db to b for *s3Backend

Fixes #7062
This commit is contained in:
Nick Craig-Wood 2023-11-13 16:40:34 +00:00
parent a2c4f07a57
commit 93f35c915a
11 changed files with 831 additions and 172 deletions

View File

@ -3134,6 +3134,13 @@ func setQuirks(opt *Options) {
// No quirks
useMultipartEtag = false // untested
useAlreadyExists = false // untested
case "Rclone":
listObjectsV2 = true
urlEncodeListings = true
virtualHostStyle = false
useMultipartEtag = false
useAlreadyExists = false
// useMultipartUploads = false - set this manually
case "Scaleway":
// Scaleway can only have 1000 parts in an upload
if opt.MaxUploadParts > 1000 {
@ -3152,11 +3159,6 @@ func setQuirks(opt *Options) {
virtualHostStyle = false
urlEncodeListings = false
useAlreadyExists = false // untested
case "Rclone":
listObjectsV2 = true
urlEncodeListings = true
virtualHostStyle = false
useMultipartEtag = false
case "Storj":
// Force chunk size to >= 64 MiB
if opt.ChunkSize < 64*fs.Mebi {

View File

@ -1,11 +1,10 @@
// Package s3 implements a fake s3 server for rclone
// Package s3 implements an s3 server for rclone
package s3
import (
"context"
"encoding/hex"
"io"
"log"
"os"
"path"
"strings"
@ -18,28 +17,30 @@ import (
)
var (
emptyPrefix = &gofakes3.Prefix{}
timeFormat = "Mon, 2 Jan 2006 15:04:05.999999999 GMT"
tmpMetaStorage = new(sync.Map)
emptyPrefix = &gofakes3.Prefix{}
timeFormat = "Mon, 2 Jan 2006 15:04:05.999999999 GMT"
)
// s3Backend implements the gofacess3.Backend interface to make an S3
// backend for gofakes3
type s3Backend struct {
opt *Options
lock sync.Mutex
fs *vfs.VFS
vfs *vfs.VFS
meta *sync.Map
}
// newBackend creates a new SimpleBucketBackend.
func newBackend(fs *vfs.VFS, opt *Options) gofakes3.Backend {
func newBackend(vfs *vfs.VFS, opt *Options) gofakes3.Backend {
return &s3Backend{
fs: fs,
opt: opt,
vfs: vfs,
opt: opt,
meta: new(sync.Map),
}
}
// ListBuckets always returns the default bucket.
func (db *s3Backend) ListBuckets() ([]gofakes3.BucketInfo, error) {
dirEntries, err := getDirEntries("/", db.fs)
func (b *s3Backend) ListBuckets() ([]gofakes3.BucketInfo, error) {
dirEntries, err := getDirEntries("/", b.vfs)
if err != nil {
return nil, err
}
@ -51,16 +52,15 @@ func (db *s3Backend) ListBuckets() ([]gofakes3.BucketInfo, error) {
CreationDate: gofakes3.NewContentTime(entry.ModTime()),
})
}
// todo: handle files in root dir
// FIXME: handle files in root dir
}
return response, nil
}
// ListBucket lists the objects in the given bucket.
func (db *s3Backend) ListBucket(bucket string, prefix *gofakes3.Prefix, page gofakes3.ListBucketPage) (*gofakes3.ObjectList, error) {
_, err := db.fs.Stat(bucket)
func (b *s3Backend) ListBucket(bucket string, prefix *gofakes3.Prefix, page gofakes3.ListBucketPage) (*gofakes3.ObjectList, error) {
_, err := b.vfs.Stat(bucket)
if err != nil {
return nil, gofakes3.BucketNotFound(bucket)
}
@ -68,9 +68,6 @@ func (db *s3Backend) ListBucket(bucket string, prefix *gofakes3.Prefix, page gof
prefix = emptyPrefix
}
db.lock.Lock()
defer db.lock.Unlock()
// workaround
if strings.TrimSpace(prefix.Prefix) == "" {
prefix.HasPrefix = false
@ -80,35 +77,31 @@ func (db *s3Backend) ListBucket(bucket string, prefix *gofakes3.Prefix, page gof
}
response := gofakes3.NewObjectList()
if db.fs.Fs().Features().BucketBased || prefix.HasDelimiter && prefix.Delimiter != "/" {
err = db.getObjectsListArbitrary(bucket, prefix, response)
if b.vfs.Fs().Features().BucketBased || prefix.HasDelimiter && prefix.Delimiter != "/" {
err = b.getObjectsListArbitrary(bucket, prefix, response)
} else {
path, remaining := prefixParser(prefix)
err = db.entryListR(bucket, path, remaining, prefix.HasDelimiter, response)
err = b.entryListR(bucket, path, remaining, prefix.HasDelimiter, response)
}
if err != nil {
return nil, err
}
return db.pager(response, page)
return b.pager(response, page)
}
// HeadObject returns the fileinfo for the given object name.
//
// Note that the metadata is not supported yet.
func (db *s3Backend) HeadObject(bucketName, objectName string) (*gofakes3.Object, error) {
_, err := db.fs.Stat(bucketName)
func (b *s3Backend) HeadObject(bucketName, objectName string) (*gofakes3.Object, error) {
_, err := b.vfs.Stat(bucketName)
if err != nil {
return nil, gofakes3.BucketNotFound(bucketName)
}
db.lock.Lock()
defer db.lock.Unlock()
fp := path.Join(bucketName, objectName)
node, err := db.fs.Stat(fp)
node, err := b.vfs.Stat(fp)
if err != nil {
return nil, gofakes3.KeyNotFound(objectName)
}
@ -131,7 +124,7 @@ func (db *s3Backend) HeadObject(bucketName, objectName string) (*gofakes3.Object
"Content-Type": fs.MimeType(context.Background(), fobj),
}
if val, ok := tmpMetaStorage.Load(fp); ok {
if val, ok := b.meta.Load(fp); ok {
metaMap := val.(map[string]string)
for k, v := range metaMap {
meta[k] = v
@ -148,18 +141,14 @@ func (db *s3Backend) HeadObject(bucketName, objectName string) (*gofakes3.Object
}
// GetObject fetchs the object from the filesystem.
func (db *s3Backend) GetObject(bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (obj *gofakes3.Object, err error) {
_, err = db.fs.Stat(bucketName)
func (b *s3Backend) GetObject(bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (obj *gofakes3.Object, err error) {
_, err = b.vfs.Stat(bucketName)
if err != nil {
return nil, gofakes3.BucketNotFound(bucketName)
}
db.lock.Lock()
defer db.lock.Unlock()
fp := path.Join(bucketName, objectName)
node, err := db.fs.Stat(fp)
node, err := b.vfs.Stat(fp)
if err != nil {
return nil, gofakes3.KeyNotFound(objectName)
}
@ -208,7 +197,7 @@ func (db *s3Backend) GetObject(bucketName, objectName string, rangeRequest *gofa
"Content-Type": fs.MimeType(context.Background(), fobj),
}
if val, ok := tmpMetaStorage.Load(fp); ok {
if val, ok := b.meta.Load(fp); ok {
metaMap := val.(map[string]string)
for k, v := range metaMap {
meta[k] = v
@ -226,31 +215,30 @@ func (db *s3Backend) GetObject(bucketName, objectName string, rangeRequest *gofa
}
// TouchObject creates or updates meta on specified object.
func (db *s3Backend) TouchObject(fp string, meta map[string]string) (result gofakes3.PutObjectResult, err error) {
_, err = db.fs.Stat(fp)
func (b *s3Backend) TouchObject(fp string, meta map[string]string) (result gofakes3.PutObjectResult, err error) {
_, err = b.vfs.Stat(fp)
if err == vfs.ENOENT {
f, err := db.fs.Create(fp)
f, err := b.vfs.Create(fp)
if err != nil {
return result, err
}
_ = f.Close()
return db.TouchObject(fp, meta)
return b.TouchObject(fp, meta)
} else if err != nil {
return result, err
}
_, err = db.fs.Stat(fp)
_, err = b.vfs.Stat(fp)
if err != nil {
return result, err
}
tmpMetaStorage.Store(fp, meta)
b.meta.Store(fp, meta)
if val, ok := meta["X-Amz-Meta-Mtime"]; ok {
ti, err := swift.FloatStringToTime(val)
if err == nil {
return result, db.fs.Chtimes(fp, ti, ti)
return result, b.vfs.Chtimes(fp, ti, ti)
}
// ignore error since the file is successfully created
}
@ -258,7 +246,7 @@ func (db *s3Backend) TouchObject(fp string, meta map[string]string) (result gofa
if val, ok := meta["mtime"]; ok {
ti, err := swift.FloatStringToTime(val)
if err == nil {
return result, db.fs.Chtimes(fp, ti, ti)
return result, b.vfs.Chtimes(fp, ti, ti)
}
// ignore error since the file is successfully created
}
@ -267,20 +255,16 @@ func (db *s3Backend) TouchObject(fp string, meta map[string]string) (result gofa
}
// PutObject creates or overwrites the object with the given name.
func (db *s3Backend) PutObject(
func (b *s3Backend) PutObject(
bucketName, objectName string,
meta map[string]string,
input io.Reader, size int64,
) (result gofakes3.PutObjectResult, err error) {
_, err = db.fs.Stat(bucketName)
_, err = b.vfs.Stat(bucketName)
if err != nil {
return result, gofakes3.BucketNotFound(bucketName)
}
db.lock.Lock()
defer db.lock.Unlock()
fp := path.Join(bucketName, objectName)
objectDir := path.Dir(fp)
// _, err = db.fs.Stat(objectDir)
@ -290,46 +274,45 @@ func (db *s3Backend) PutObject(
// }
if objectDir != "." {
if err := mkdirRecursive(objectDir, db.fs); err != nil {
if err := mkdirRecursive(objectDir, b.vfs); err != nil {
return result, err
}
}
if size == 0 {
// maybe a touch operation
return db.TouchObject(fp, meta)
return b.TouchObject(fp, meta)
}
f, err := db.fs.Create(fp)
f, err := b.vfs.Create(fp)
if err != nil {
return result, err
}
w := io.MultiWriter(f)
if _, err := io.Copy(w, input); err != nil {
if _, err := io.Copy(f, input); err != nil {
// remove file when i/o error occurred (FsPutErr)
_ = f.Close()
_ = db.fs.Remove(fp)
_ = b.vfs.Remove(fp)
return result, err
}
if err := f.Close(); err != nil {
// remove file when close error occurred (FsPutErr)
_ = db.fs.Remove(fp)
_ = b.vfs.Remove(fp)
return result, err
}
_, err = db.fs.Stat(fp)
_, err = b.vfs.Stat(fp)
if err != nil {
return result, err
}
tmpMetaStorage.Store(fp, meta)
b.meta.Store(fp, meta)
if val, ok := meta["X-Amz-Meta-Mtime"]; ok {
ti, err := swift.FloatStringToTime(val)
if err == nil {
return result, db.fs.Chtimes(fp, ti, ti)
return result, b.vfs.Chtimes(fp, ti, ti)
}
// ignore error since the file is successfully created
}
@ -337,7 +320,7 @@ func (db *s3Backend) PutObject(
if val, ok := meta["mtime"]; ok {
ti, err := swift.FloatStringToTime(val)
if err == nil {
return result, db.fs.Chtimes(fp, ti, ti)
return result, b.vfs.Chtimes(fp, ti, ti)
}
// ignore error since the file is successfully created
}
@ -346,13 +329,10 @@ func (db *s3Backend) PutObject(
}
// DeleteMulti deletes multiple objects in a single request.
func (db *s3Backend) DeleteMulti(bucketName string, objects ...string) (result gofakes3.MultiDeleteResult, rerr error) {
db.lock.Lock()
defer db.lock.Unlock()
func (b *s3Backend) DeleteMulti(bucketName string, objects ...string) (result gofakes3.MultiDeleteResult, rerr error) {
for _, object := range objects {
if err := db.deleteObjectLocked(bucketName, object); err != nil {
log.Println("delete object failed:", err)
if err := b.deleteObject(bucketName, object); err != nil {
fs.Errorf("serve s3", "delete object failed: %v", err)
result.Error = append(result.Error, gofakes3.ErrorResult{
Code: gofakes3.ErrInternal,
Message: gofakes3.ErrInternal.Message(),
@ -369,17 +349,13 @@ func (db *s3Backend) DeleteMulti(bucketName string, objects ...string) (result g
}
// DeleteObject deletes the object with the given name.
func (db *s3Backend) DeleteObject(bucketName, objectName string) (result gofakes3.ObjectDeleteResult, rerr error) {
db.lock.Lock()
defer db.lock.Unlock()
return result, db.deleteObjectLocked(bucketName, objectName)
func (b *s3Backend) DeleteObject(bucketName, objectName string) (result gofakes3.ObjectDeleteResult, rerr error) {
return result, b.deleteObject(bucketName, objectName)
}
// deleteObjectLocked deletes the object from the filesystem.
func (db *s3Backend) deleteObjectLocked(bucketName, objectName string) error {
_, err := db.fs.Stat(bucketName)
// deleteObject deletes the object from the filesystem.
func (b *s3Backend) deleteObject(bucketName, objectName string) error {
_, err := b.vfs.Stat(bucketName)
if err != nil {
return gofakes3.BucketNotFound(bucketName)
}
@ -387,20 +363,20 @@ func (db *s3Backend) deleteObjectLocked(bucketName, objectName string) error {
fp := path.Join(bucketName, objectName)
// S3 does not report an error when attemping to delete a key that does not exist, so
// we need to skip IsNotExist errors.
if err := db.fs.Remove(fp); err != nil && !os.IsNotExist(err) {
if err := b.vfs.Remove(fp); err != nil && !os.IsNotExist(err) {
return err
}
// fixme: unsafe operation
if db.fs.Fs().Features().CanHaveEmptyDirectories {
rmdirRecursive(fp, db.fs)
// FIXME: unsafe operation
if b.vfs.Fs().Features().CanHaveEmptyDirectories {
rmdirRecursive(fp, b.vfs)
}
return nil
}
// CreateBucket creates a new bucket.
func (db *s3Backend) CreateBucket(name string) error {
_, err := db.fs.Stat(name)
func (b *s3Backend) CreateBucket(name string) error {
_, err := b.vfs.Stat(name)
if err != nil && err != vfs.ENOENT {
return gofakes3.ErrInternal
}
@ -409,20 +385,20 @@ func (db *s3Backend) CreateBucket(name string) error {
return gofakes3.ErrBucketAlreadyExists
}
if err := db.fs.Mkdir(name, 0755); err != nil {
if err := b.vfs.Mkdir(name, 0755); err != nil {
return gofakes3.ErrInternal
}
return nil
}
// DeleteBucket deletes the bucket with the given name.
func (db *s3Backend) DeleteBucket(name string) error {
_, err := db.fs.Stat(name)
func (b *s3Backend) DeleteBucket(name string) error {
_, err := b.vfs.Stat(name)
if err != nil {
return gofakes3.BucketNotFound(name)
}
if err := db.fs.Remove(name); err != nil {
if err := b.vfs.Remove(name); err != nil {
return gofakes3.ErrBucketNotEmpty
}
@ -430,8 +406,8 @@ func (db *s3Backend) DeleteBucket(name string) error {
}
// BucketExists checks if the bucket exists.
func (db *s3Backend) BucketExists(name string) (exists bool, err error) {
_, err = db.fs.Stat(name)
func (b *s3Backend) BucketExists(name string) (exists bool, err error) {
_, err = b.vfs.Stat(name)
if err != nil {
return false, nil
}
@ -440,11 +416,10 @@ func (db *s3Backend) BucketExists(name string) (exists bool, err error) {
}
// CopyObject copy specified object from srcKey to dstKey.
func (db *s3Backend) CopyObject(srcBucket, srcKey, dstBucket, dstKey string, meta map[string]string) (result gofakes3.CopyObjectResult, err error) {
func (b *s3Backend) CopyObject(srcBucket, srcKey, dstBucket, dstKey string, meta map[string]string) (result gofakes3.CopyObjectResult, err error) {
fp := path.Join(srcBucket, srcKey)
if srcBucket == dstBucket && srcKey == dstKey {
tmpMetaStorage.Store(fp, meta)
b.meta.Store(fp, meta)
val, ok := meta["X-Amz-Meta-Mtime"]
if !ok {
@ -458,15 +433,15 @@ func (db *s3Backend) CopyObject(srcBucket, srcKey, dstBucket, dstKey string, met
return result, nil
}
return result, db.fs.Chtimes(fp, ti, ti)
return result, b.vfs.Chtimes(fp, ti, ti)
}
cStat, err := db.fs.Stat(fp)
cStat, err := b.vfs.Stat(fp)
if err != nil {
return
}
c, err := db.GetObject(srcBucket, srcKey, nil)
c, err := b.GetObject(srcBucket, srcKey, nil)
if err != nil {
return
}
@ -483,7 +458,7 @@ func (db *s3Backend) CopyObject(srcBucket, srcKey, dstBucket, dstKey string, met
meta["mtime"] = swift.TimeToFloatString(cStat.ModTime())
}
_, err = db.PutObject(dstBucket, dstKey, meta, c.Contents, c.Size)
_, err = b.PutObject(dstBucket, dstKey, meta, c.Contents, c.Size)
if err != nil {
return
}

View File

@ -1,45 +0,0 @@
package s3
var longHelp = `
Serve s3 implements a basic s3 server that serves a remote
via s3. This can be viewed with an s3 client, or you can make
an s3 type remote to read and write to it.
S3 server supports Signature Version 4 authentication. Just
use ` + `--s3-authkey accessKey1,secretKey1` + ` and
set Authorization Header correctly in the request. (See
https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)
Please note that some clients may require HTTPS endpoints.
See [#SSL](#ssl-tls) for SSL configuration.
Use ` + `--force-path-style=false` + ` if you want to use bucket name as a part of
hostname (such as mybucket.local)
Use ` + `--etag-hash` + ` if you want to change hash provider.
Limitations
serve s3 will treat all depth=1 directories in root as buckets and
ignore files in that depth. You might use CreateBucket to create
folders under root, but you can't create empty folder under other folders.
When using PutObject or DeleteObject, rclone will automatically create
or clean up empty folders by the prefix. If you don't want to clean up
empty folders automatically, use ` + `--no-cleanup` + `.
When using ListObjects, rclone will use ` + `/` + ` when the delimiter is empty.
This reduces backend requests with no effect on most operations, but if
the delimiter is something other than slash and nil, rclone will do a
full recursive search to the backend, which may take some time.
serve s3 currently supports the following operations.
Bucket-level operations
ListBuckets, CreateBucket, DeleteBucket
Object-level operations
HeadObject, ListObjects, GetObject, PutObject, DeleteObject, DeleteObjects,
CreateMultipartUpload, CompleteMultipartUpload, AbortMultipartUpload,
CopyObject, UploadPart
Other operations will encounter error Unimplemented.
`

View File

@ -10,10 +10,10 @@ import (
"github.com/rclone/rclone/fs/walk"
)
func (db *s3Backend) entryListR(bucket, fdPath, name string, acceptComPrefix bool, response *gofakes3.ObjectList) error {
func (b *s3Backend) entryListR(bucket, fdPath, name string, acceptComPrefix bool, response *gofakes3.ObjectList) error {
fp := path.Join(bucket, fdPath)
dirEntries, err := getDirEntries(fp, db.fs)
dirEntries, err := getDirEntries(fp, b.vfs)
if err != nil {
return err
}
@ -33,7 +33,7 @@ func (db *s3Backend) entryListR(bucket, fdPath, name string, acceptComPrefix boo
response.AddPrefix(gofakes3.URLEncode(objectPath))
continue
}
err := db.entryListR(bucket, path.Join(fdPath, object), "", false, response)
err := b.entryListR(bucket, path.Join(fdPath, object), "", false, response)
if err != nil {
return err
}
@ -52,10 +52,9 @@ func (db *s3Backend) entryListR(bucket, fdPath, name string, acceptComPrefix boo
}
// getObjectsList lists the objects in the given bucket.
func (db *s3Backend) getObjectsListArbitrary(bucket string, prefix *gofakes3.Prefix, response *gofakes3.ObjectList) error {
func (b *s3Backend) getObjectsListArbitrary(bucket string, prefix *gofakes3.Prefix, response *gofakes3.ObjectList) error {
// ignore error - vfs may have uncommitted updates, such as new dir etc.
_ = walk.ListR(context.Background(), db.fs.Fs(), bucket, false, -1, walk.ListObjects, func(entries fs.DirEntries) error {
_ = walk.ListR(context.Background(), b.vfs.Fs(), bucket, false, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, entry := range entries {
entry := entry.(fs.Object)
objName := entry.Remote()

View File

@ -2,7 +2,7 @@ package s3
import (
"context"
"strings"
_ "embed"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config/flags"
@ -33,15 +33,23 @@ func init() {
vfsflags.AddFlags(flagSet)
flags.BoolVarP(flagSet, &Opt.pathBucketMode, "force-path-style", "", Opt.pathBucketMode, "If true use path style access if false use virtual hosted style (default true)", "")
flags.StringVarP(flagSet, &Opt.hashName, "etag-hash", "", Opt.hashName, "Which hash to use for the ETag, or auto or blank for off", "")
flags.StringArrayVarP(flagSet, &Opt.authPair, "s3-authkey", "", Opt.authPair, "Set key pair for v4 authorization, split by comma", "")
flags.StringArrayVarP(flagSet, &Opt.authPair, "auth-key", "", Opt.authPair, "Set key pair for v4 authorization: access_key_id,secret_access_key", "")
flags.BoolVarP(flagSet, &Opt.noCleanup, "no-cleanup", "", Opt.noCleanup, "Not to cleanup empty folder after object is deleted", "")
}
//go:embed serve_s3.md
var serveS3Help string
// Command definition for cobra
var Command = &cobra.Command{
Annotations: map[string]string{
"versionIntroduced": "v1.65",
"groups": "Filter",
"status": "Experimental",
},
Use: "s3 remote:path",
Short: `Serve remote:path over s3.`,
Long: strings.ReplaceAll(longHelp, "|", "`") + httplib.Help(flagPrefix) + vfs.Help,
Long: serveS3Help + httplib.Help(flagPrefix) + vfs.Help,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 1, command, args)
f := cmd.NewFsSrc(args)

View File

@ -69,7 +69,6 @@ func TestS3(t *testing.T) {
"type": "s3",
"provider": "Rclone",
"endpoint": testURL,
"list_url_encode": "true",
"access_key_id": keyid,
"secret_access_key": keysec,
}

104
cmd/serve/s3/serve_s3.md Normal file
View File

@ -0,0 +1,104 @@
`serve s3` implements a basic s3 server that serves a remote via s3.
This can be viewed with an s3 client, or you can make an [s3 type
remote](/s3/) to read and write to it with rclone.
`serve s3` is considered **Experimental** so use with care.
S3 server supports Signature Version 4 authentication. Just use
`--auth-key accessKey,secretKey` and set the `Authorization`
header correctly in the request. (See the [AWS
docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)).
`--auth-key` can be repeated for multiple auth pairs. If
`--auth-key` is not provided then `serve s3` will allow anonymous
access.
Please note that some clients may require HTTPS endpoints. See [the
SSL docs](#ssl-tls) for more information.
This command uses the [VFS directory cache](#vfs-virtual-file-system).
All the functionality will work with `--vfs-cache-mode off`. Using
`--vfs-cache-mode full` (or `writes`) can be used to cache objects
locally to improve performance.
Use `--force-path-style=false` if you want to use the bucket name as a
part of the hostname (such as mybucket.local)
Use `--etag-hash` if you want to change the hash uses for the `ETag`.
Note that using anything other than `MD5` (the default) is likely to
cause problems for S3 clients which rely on the Etag being the MD5.
### Quickstart
For a simple set up, to serve `remote:path` over s3, run the server
like this:
```
rclone serve s3 --auth-key ACCESS_KEY_ID,SECRET_ACCESS_KEY remote:path
```
This will be compatible with an rclone remote which is defined like this:
```
[serves3]
type = s3
provider = Rclone
endpoint = http://127.0.0.1:8080/
access_key_id = ACCESS_KEY_ID
secret_access_key = SECRET_ACCESS_KEY
use_multipart_uploads = false
```
Note that setting `disable_multipart_uploads = true` is to work around
[a bug](#bugs) which will be fixed in due course.
### Bugs
When uploading multipart files `serve s3` holds all the parts in
memory. This is a limitaton of the library rclone uses for serving S3
and will hopefully be fixed at some point.
### Limitations
`serve s3` will treat all directories in the root as buckets and
ignore all files in the root. You can use `CreateBucket` to create
folders under the root, but you can't create empty folders under other
folders not in the root.
When using `PutObject` or `DeleteObject`, rclone will automatically
create or clean up empty folders. If you don't want to clean up empty
folders automatically, use `--no-cleanup`.
When using `ListObjects`, rclone will use `/` when the delimiter is
empty. This reduces backend requests with no effect on most
operations, but if the delimiter is something other than `/` and
empty, rclone will do a full recursive search of the backend, which
can take some time.
Versioning is not currently supported.
Metadata will only be saved in memory other than the rclone `mtime`
metadata which will be set as the modification time of the file.
### Supported operations
`serve s3` currently supports the following operations.
- Bucket
- `ListBuckets`
- `CreateBucket`
- `DeleteBucket`
- Object
- `HeadObject`
- `ListObjects`
- `GetObject`
- `PutObject`
- `DeleteObject`
- `DeleteObjects`
- `CreateMultipartUpload`
- `CompleteMultipartUpload`
- `AbortMultipartUpload`
- `CopyObject`
- `UploadPart`
Other operations will return error `Unimplemented`.

View File

@ -45,6 +45,10 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *Server, err error
vfs: vfs.New(f, &vfsflags.Opt),
}
if len(opt.authPair) == 0 {
fs.Logf("serve s3", "No auth provided so allowing anonymous access")
}
var newLogger logger
w.faker = gofakes3.New(
newBackend(w.vfs, opt),
@ -53,6 +57,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *Server, err error
gofakes3.WithRequestID(rand.Uint64()),
gofakes3.WithoutVersioning(),
gofakes3.WithV4Auth(authlistResolver(opt.authPair)),
gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied
)
w.Server, err = httplib.NewServer(ctx,

View File

@ -15,8 +15,8 @@ import (
"github.com/rclone/rclone/vfs"
)
func getDirEntries(prefix string, fs *vfs.VFS) (vfs.Nodes, error) {
node, err := fs.Stat(prefix)
func getDirEntries(prefix string, VFS *vfs.VFS) (vfs.Nodes, error) {
node, err := VFS.Stat(prefix)
if err == vfs.ENOENT {
return nil, gofakes3.ErrNoSuchKey
@ -83,7 +83,6 @@ func getFileHash(node interface{}) string {
}
func prefixParser(p *gofakes3.Prefix) (path, remaining string) {
idx := strings.LastIndexByte(p.Prefix, '/')
if idx < 0 {
return "", p.Prefix
@ -91,14 +90,15 @@ func prefixParser(p *gofakes3.Prefix) (path, remaining string) {
return p.Prefix[:idx], p.Prefix[idx+1:]
}
func mkdirRecursive(path string, fs *vfs.VFS) error {
// FIXME this could be implemented by VFS.MkdirAll()
func mkdirRecursive(path string, VFS *vfs.VFS) error {
path = strings.Trim(path, "/")
dirs := strings.Split(path, "/")
dir := ""
for _, d := range dirs {
dir += "/" + d
if _, err := fs.Stat(dir); err != nil {
err := fs.Mkdir(dir, 0777)
if _, err := VFS.Stat(dir); err != nil {
err := VFS.Mkdir(dir, 0777)
if err != nil {
return err
}
@ -107,30 +107,30 @@ func mkdirRecursive(path string, fs *vfs.VFS) error {
return nil
}
func rmdirRecursive(p string, fs *vfs.VFS) {
func rmdirRecursive(p string, VFS *vfs.VFS) {
dir := path.Dir(p)
if !strings.ContainsAny(dir, "/\\") {
// might be bucket(root)
return
}
if _, err := fs.Stat(dir); err == nil {
err := fs.Remove(dir)
if _, err := VFS.Stat(dir); err == nil {
err := VFS.Remove(dir)
if err != nil {
return
}
rmdirRecursive(dir, fs)
rmdirRecursive(dir, VFS)
}
}
func authlistResolver(list []string) map[string]string {
authList := make(map[string]string)
for _, v := range list {
splited := strings.Split(v, ",")
if len(splited) != 2 {
parts := strings.Split(v, ",")
if len(parts) != 2 {
fs.Infof(nil, fmt.Sprintf("Ignored: invalid auth pair %s", v))
continue
}
authList[splited[0]] = splited[1]
authList[parts[0]] = parts[1]
}
return authList
}

View File

@ -0,0 +1,586 @@
---
title: "rclone serve s3"
description: "Serve remote:path over s3."
slug: rclone_serve_s3
url: /commands/rclone_serve_s3/
groups: Filter
status: Experimental
versionIntroduced: v1.65
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/serve/s3/ and as part of making a release run "make commanddocs"
---
# rclone serve s3
Serve remote:path over s3.
## Synopsis
`serve s3` implements a basic s3 server that serves a remote via s3.
This can be viewed with an s3 client, or you can make an [s3 type
remote](/s3/) to read and write to it with rclone.
`serve s3` is considered **Experimental** so use with care.
S3 server supports Signature Version 4 authentication. Just use
`--auth-key accessKey,secretKey` and set the `Authorization`
header correctly in the request. (See the [AWS
docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)).
`--auth-key` can be repeated for multiple auth pairs. If
`--auth-key` is not provided then `serve s3` will allow anonymous
access.
Please note that some clients may require HTTPS endpoints. See [the
SSL docs](#ssl-tls) for more information.
This command uses the [VFS directory cache](#vfs-virtual-file-system).
All the functionality will work with `--vfs-cache-mode off`. Using
`--vfs-cache-mode full` (or `writes`) can be used to cache objects
locally to improve performance.
Use `--force-path-style=false` if you want to use the bucket name as a
part of the hostname (such as mybucket.local)
Use `--etag-hash` if you want to change the hash uses for the `ETag`.
Note that using anything other than `MD5` (the default) is likely to
cause problems for S3 clients which rely on the Etag being the MD5.
## Quickstart
For a simple set up, to serve `remote:path` over s3, run the server
like this:
```
rclone serve s3 --auth-key ACCESS_KEY_ID,SECRET_ACCESS_KEY remote:path
```
This will be compatible with an rclone remote which is defined like this:
```
[serves3]
type = s3
provider = Rclone
endpoint = http://127.0.0.1:8080/
access_key_id = ACCESS_KEY_ID
secret_access_key = SECRET_ACCESS_KEY
use_multipart_uploads = false
```
Note that setting `disable_multipart_uploads = true` is to work around
[a bug](#bugs) which will be fixed in due course.
## Bugs
When uploading multipart files `serve s3` holds all the parts in
memory. This is a limitaton of the library rclone uses for serving S3
and will hopefully be fixed at some point.
## Limitations
`serve s3` will treat all directories in the root as buckets and
ignore all files in the root. You can use `CreateBucket` to create
folders under the root, but you can't create empty folders under other
folders not in the root.
When using `PutObject` or `DeleteObject`, rclone will automatically
create or clean up empty folders. If you don't want to clean up empty
folders automatically, use `--no-cleanup`.
When using `ListObjects`, rclone will use `/` when the delimiter is
empty. This reduces backend requests with no effect on most
operations, but if the delimiter is something other than `/` and
empty, rclone will do a full recursive search of the backend, which
can take some time.
Versioning is not currently supported.
Metadata will only be saved in memory other than the rclone `mtime`
metadata which will be set as the modification time of the file.
## Supported operations
`serve s3` currently supports the following operations.
- Bucket
- `ListBuckets`
- `CreateBucket`
- `DeleteBucket`
- Object
- `HeadObject`
- `ListObjects`
- `GetObject`
- `PutObject`
- `DeleteObject`
- `DeleteObjects`
- `CreateMultipartUpload`
- `CompleteMultipartUpload`
- `AbortMultipartUpload`
- `CopyObject`
- `UploadPart`
Other operations will return error `Unimplemented`.
## Server options
Use `--addr` to specify which IP address and port the server should
listen on, eg `--addr 1.2.3.4:8000` or `--addr :8080` to listen to all
IPs. By default it only listens on localhost. You can use port
:0 to let the OS choose an available port.
If you set `--addr` to listen on a public or LAN accessible IP address
then using Authentication is advised - see the next section for info.
You can use a unix socket by setting the url to `unix:///path/to/socket`
or just by using an absolute path name. Note that unix sockets bypass the
authentication - this is expected to be done with file system permissions.
`--addr` may be repeated to listen on multiple IPs/ports/sockets.
`--server-read-timeout` and `--server-write-timeout` can be used to
control the timeouts on the server. Note that this is the total time
for a transfer.
`--max-header-bytes` controls the maximum number of bytes the server will
accept in the HTTP header.
`--baseurl` controls the URL prefix that rclone serves from. By default
rclone will serve from the root. If you used `--baseurl "/rclone"` then
rclone would serve from a URL starting with "/rclone/". This is
useful if you wish to proxy rclone serve. Rclone automatically
inserts leading and trailing "/" on `--baseurl`, so `--baseurl "rclone"`,
`--baseurl "/rclone"` and `--baseurl "/rclone/"` are all treated
identically.
### TLS (SSL)
By default this will serve over http. If you want you can serve over
https. You will need to supply the `--cert` and `--key` flags.
If you wish to do client side certificate validation then you will need to
supply `--client-ca` also.
`--cert` should be a either a PEM encoded certificate or a concatenation
of that with the CA certificate. `--key` should be the PEM encoded
private key and `--client-ca` should be the PEM encoded client
certificate authority certificate.
--min-tls-version is minimum TLS version that is acceptable. Valid
values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default
"tls1.0").
## VFS - Virtual File System
This command uses the VFS layer. This adapts the cloud storage objects
that rclone uses into something which looks much more like a disk
filing system.
Cloud storage objects have lots of properties which aren't like disk
files - you can't extend them or write to the middle of them, so the
VFS layer has to deal with that. Because there is no one right way of
doing this there are various options explained below.
The VFS layer also implements a directory cache - this caches info
about files and directories (but not the data) in memory.
## VFS Directory Cache
Using the `--dir-cache-time` flag, you can control how long a
directory should be considered up to date and not refreshed from the
backend. Changes made through the VFS will appear immediately or
invalidate the cache.
--dir-cache-time duration Time to cache directory entries for (default 5m0s)
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable (default 1m0s)
However, changes made directly on the cloud storage by the web
interface or a different copy of rclone will only be picked up once
the directory cache expires if the backend configured does not support
polling for changes. If the backend supports polling, changes will be
picked up within the polling interval.
You can send a `SIGHUP` signal to rclone for it to flush all
directory caches, regardless of how old they are. Assuming only one
rclone instance is running, you can reset the cache like this:
kill -SIGHUP $(pidof rclone)
If you configure rclone with a [remote control](/rc) then you can use
rclone rc to flush the whole directory cache:
rclone rc vfs/forget
Or individual files or directories:
rclone rc vfs/forget file=path/to/file dir=path/to/dir
## VFS File Buffering
The `--buffer-size` flag determines the amount of memory,
that will be used to buffer data in advance.
Each open file will try to keep the specified amount of data in memory
at all times. The buffered data is bound to one open file and won't be
shared.
This flag is a upper limit for the used memory per open file. The
buffer will only use memory for data that is downloaded but not not
yet read. If the buffer is empty, only a small amount of memory will
be used.
The maximum memory used by rclone for buffering can be up to
`--buffer-size * open files`.
## VFS File Caching
These flags control the VFS file caching options. File caching is
necessary to make the VFS layer appear compatible with a normal file
system. It can be disabled at the cost of some compatibility.
For example you'll need to enable VFS caching if you want to read and
write simultaneously to a file. See below for more details.
Note that the VFS cache is separate from the cache backend and you may
find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
If run with `-vv` rclone will print the location of the file cache. The
files are stored in the user cache file area which is OS dependent but
can be controlled with `--cache-dir` or setting the appropriate
environment variable.
The cache has 4 different modes selected by `--vfs-cache-mode`.
The higher the cache mode the more compatible rclone becomes at the
cost of using disk space.
Note that files are written back to the remote only when they are
closed and if they haven't been accessed for `--vfs-write-back`
seconds. If rclone is quit or dies with files that haven't been
uploaded, these will be uploaded next time rclone is run with the same
flags.
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
that the cache may exceed these quotas for two reasons. Firstly
because it is only checked every `--vfs-cache-poll-interval`. Secondly
because open files cannot be evicted from the cache. When
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
rclone will attempt to evict the least accessed files from the cache
first. rclone will start with files that haven't been accessed for the
longest. This cache flushing strategy is efficient and more relevant
files are likely to remain cached.
The `--vfs-cache-max-age` will evict files from the cache
after the set time since last access has passed. The default value of
1 hour will start evicting files from cache that haven't been accessed
for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0
and will wait for 1 more hour before evicting. Specify the time with
standard notation, s, m, h, d, w .
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
This can potentially cause data corruption if you do. You can work
around this by giving each rclone its own cache hierarchy with
`--cache-dir`. You don't need to worry about this if the remotes in
use don't overlap.
### --vfs-cache-mode off
In this mode (the default) the cache will read directly from the remote and write
directly to the remote without caching anything on disk.
This will mean some operations are not possible
* Files can't be opened for both read AND write
* Files opened for write can't be seeked
* Existing files opened for write must have O_TRUNC set
* Files open for read with O_TRUNC will be opened write only
* Files open for write only will behave as if O_TRUNC was supplied
* Open modes O_APPEND, O_TRUNC are ignored
* If an upload fails it can't be retried
### --vfs-cache-mode minimal
This is very similar to "off" except that files opened for read AND
write will be buffered to disk. This means that files opened for
write will be a lot more compatible, but uses the minimal disk space.
These operations are not possible
* Files opened for write only can't be seeked
* Existing files opened for write must have O_TRUNC set
* Files opened for write only will ignore O_APPEND, O_TRUNC
* If an upload fails it can't be retried
### --vfs-cache-mode writes
In this mode files opened for read only are still read directly from
the remote, write only and read/write files are buffered to disk
first.
This mode should support all normal file system operations.
If an upload fails it will be retried at exponentially increasing
intervals up to 1 minute.
### --vfs-cache-mode full
In this mode all reads and writes are buffered to and from disk. When
data is read from the remote this is buffered to disk as well.
In this mode the files in the cache will be sparse files and rclone
will keep track of which bits of the files it has downloaded.
So if an application only reads the starts of each file, then rclone
will only buffer the start of the file. These files will appear to be
their full size in the cache, but they will be sparse files with only
the data that has been downloaded present in them.
This mode should support all normal file system operations and is
otherwise identical to `--vfs-cache-mode` writes.
When reading a file rclone will read `--buffer-size` plus
`--vfs-read-ahead` bytes ahead. The `--buffer-size` is buffered in memory
whereas the `--vfs-read-ahead` is buffered on disk.
When using this mode it is recommended that `--buffer-size` is not set
too large and `--vfs-read-ahead` is set large if required.
**IMPORTANT** not all file systems support sparse files. In particular
FAT/exFAT do not. Rclone will perform very badly if the cache
directory is on a filesystem which doesn't support sparse files and it
will log an ERROR message if one is detected.
### Fingerprinting
Various parts of the VFS use fingerprinting to see if a local file
copy has changed relative to a remote file. Fingerprints are made
from:
- size
- modification time
- hash
where available on an object.
On some backends some of these attributes are slow to read (they take
an extra API call per object, or extra work per object).
For example `hash` is slow with the `local` and `sftp` backends as
they have to read the entire file and hash it, and `modtime` is slow
with the `s3`, `swift`, `ftp` and `qinqstor` backends because they
need to do an extra API call to fetch it.
If you use the `--vfs-fast-fingerprint` flag then rclone will not
include the slow operations in the fingerprint. This makes the
fingerprinting less accurate but much faster and will improve the
opening time of cached files.
If you are running a vfs cache over `local`, `s3` or `swift` backends
then using this flag is recommended.
Note that if you change the value of this flag, the fingerprints of
the files in the cache may be invalidated and the files will need to
be downloaded again.
## VFS Chunked Reading
When rclone reads files from a remote it reads them in chunks. This
means that rather than requesting the whole file rclone reads the
chunk specified. This can reduce the used download quota for some
remotes by requesting only chunks from the remote that are actually
read, at the cost of an increased number of requests.
These flags control the chunking:
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M)
--vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off)
Rclone will start reading a chunk of size `--vfs-read-chunk-size`,
and then double the size for each read. When `--vfs-read-chunk-size-limit` is
specified, and greater than `--vfs-read-chunk-size`, the chunk size for each
open file will get doubled only until the specified value is reached. If the
value is "off", which is the default, the limit is disabled and the chunk size
will grow indefinitely.
With `--vfs-read-chunk-size 100M` and `--vfs-read-chunk-size-limit 0`
the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
When `--vfs-read-chunk-size-limit 500M` is specified, the result would be
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading.
## VFS Performance
These flags may be used to enable/disable features of the VFS for
performance or other reasons. See also the [chunked reading](#vfs-chunked-reading)
feature.
In particular S3 and Swift benefit hugely from the `--no-modtime` flag
(or use `--use-server-modtime` for a slightly different effect) as each
read of the modification time takes a transaction.
--no-checksum Don't compare checksums on up/download.
--no-modtime Don't read/write the modification time (can speed things up).
--no-seek Don't allow seeking in files.
--read-only Only allow read-only access.
Sometimes rclone is delivered reads or writes out of order. Rather
than seeking rclone will wait a short time for the in sequence read or
write to come in. These flags only come into effect when not using an
on disk cache file.
--vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms)
--vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s)
When using VFS write caching (`--vfs-cache-mode` with value writes or full),
the global flag `--transfers` can be set to adjust the number of parallel uploads of
modified files from the cache (the related global flag `--checkers` has no effect on the VFS).
--transfers int Number of file transfers to run in parallel (default 4)
## VFS Case Sensitivity
Linux file systems are case-sensitive: two files can differ only
by case, and the exact case must be used when opening a file.
File systems in modern Windows are case-insensitive but case-preserving:
although existing files can be opened using any case, the exact case used
to create the file is preserved and available for programs to query.
It is not allowed for two files in the same directory to differ only by case.
Usually file systems on macOS are case-insensitive. It is possible to make macOS
file systems case-sensitive but that is not the default.
The `--vfs-case-insensitive` VFS flag controls how rclone handles these
two cases. If its value is "false", rclone passes file names to the remote
as-is. If the flag is "true" (or appears without a value on the
command line), rclone may perform a "fixup" as explained below.
The user may specify a file name to open/delete/rename/etc with a case
different than what is stored on the remote. If an argument refers
to an existing file with exactly the same name, then the case of the existing
file on the disk will be used. However, if a file name with exactly the same
name is not found but a name differing only by case exists, rclone will
transparently fixup the name. This fixup happens only when an existing file
is requested. Case sensitivity of file names created anew by rclone is
controlled by the underlying remote.
Note that case sensitivity of the operating system running rclone (the target)
may differ from case sensitivity of a file system presented by rclone (the source).
The flag controls whether "fixup" is performed to satisfy the target.
If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
## VFS Disk Options
This flag allows you to manually set the statistics about the filing system.
It can be useful when those statistics cannot be read correctly automatically.
--vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
## Alternate report of used bytes
Some backends, most notably S3, do not report the amount of bytes used.
If you need this information to be available when running `df` on the
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
With this flag set, instead of relying on the backend to report this
information, rclone will scan the whole remote similar to `rclone size`
and compute the total used space itself.
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
result is accurate. However, this is very inefficient and may cost lots of API
calls resulting in extra charges. Use it as a last resort and only with caching.
```
rclone serve s3 remote:path [flags]
```
## Options
```
--addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080])
--allow-origin string Origin which cross-domain request (CORS) can be executed from
--auth-key stringArray Set key pair for v4 authorization: access_key_id,secret_access_key
--baseurl string Prefix for URLs - leave blank for root
--cert string TLS PEM key (concatenation of certificate and CA certificate)
--client-ca string Client certificate authority to verify clients with
--dir-cache-time Duration Time to cache directory entries for (default 5m0s)
--dir-perms FileMode Directory permissions (default 0777)
--etag-hash string Which hash to use for the ETag, or auto or blank for off (default "MD5")
--file-perms FileMode File permissions (default 0666)
--force-path-style If true use path style access if false use virtual hosted style (default true) (default true)
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
-h, --help help for s3
--key string TLS PEM Private key
--max-header-bytes int Maximum size of request header (default 4096)
--min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
--no-checksum Don't compare checksums on up/download
--no-cleanup Not to cleanup empty folder after object is deleted
--no-modtime Don't read/write the modification time (can speed things up)
--no-seek Don't allow seeking in files
--poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s)
--read-only Only allow read-only access
--server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
--server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-case-insensitive If a file name not found, find a case insensitive match
--vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
--vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
--vfs-refresh Refreshes the directory cache recursively on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
```
## Filter Options
Flags for filtering directory listings.
```
--delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
--exclude-if-present stringArray Exclude directories if filename is present
--files-from stringArray Read list of source-file names from file (use - to read from stdin)
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
-f, --filter stringArray Add a file filtering rule
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
--ignore-case Ignore case in filters (case insensitive)
--include stringArray Include files matching pattern
--include-from stringArray Read file include patterns from file (use - to read from stdin)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-depth int If set limits the recursion depth to this (default -1)
--max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
--metadata-exclude stringArray Exclude metadatas matching pattern
--metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
--metadata-filter stringArray Add a metadata filtering rule
--metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
--metadata-include stringArray Include metadatas matching pattern
--metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
```
See the [global flags page](/flags/) for global options not listed here.
# SEE ALSO
* [rclone serve](/commands/rclone_serve/) - Serve a remote over a protocol.

View File

@ -29,6 +29,7 @@ The S3 backend can be used with a number of different providers:
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
{{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}}
{{< provider name="Rclone Serve S3" home="/commands/rclone_serve_http/" config="/s3/#rclone" >}}
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
{{< provider name="Seagate Lyve Cloud" home="https://www.seagate.com/gb/en/services/cloud/storage/" config="/s3/#lyve" >}}
{{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}}
@ -4590,6 +4591,31 @@ endpoint = s3.rackcorp.com
location_constraint = au-nsw
```
### Rclone Serve S3 {#rclone}
Rclone can serve any remote over the S3 protocol. For details see the
[rclone serve s3](/commands/rclone_serve_http/) documentation.
For example, to serve `remote:path` over s3, run the server like this:
```
rclone serve s3 --auth-key ACCESS_KEY_ID,SECRET_ACCESS_KEY remote:path
```
This will be compatible with an rclone remote which is defined like this:
```
[serves3]
type = s3
provider = Rclone
endpoint = http://127.0.0.1:8080/
access_key_id = ACCESS_KEY_ID
secret_access_key = SECRET_ACCESS_KEY
use_multipart_uploads = false
```
Note that setting `disable_multipart_uploads = true` is to work around
[a bug](/commands/rclone_serve_http/#bugs) which will be fixed in due course.
### Scaleway