2017-10-15 18:45:39 +08:00
|
|
|
// Package mega provides an interface to the Mega
|
|
|
|
// object storage system.
|
|
|
|
package mega
|
|
|
|
|
|
|
|
/*
|
|
|
|
Open questions
|
|
|
|
* Does mega support a content hash - what exactly are the mega hashes?
|
|
|
|
* Can mega support setting modification times?
|
|
|
|
|
|
|
|
Improvements:
|
|
|
|
* Uploads could be done in parallel
|
|
|
|
* Downloads would be more efficient done in one go
|
|
|
|
* Uploads would be more efficient with bigger chunks
|
2020-10-14 05:43:40 +08:00
|
|
|
* Looks like mega can support server-side copy, but it isn't implemented in go-mega
|
2017-10-15 18:45:39 +08:00
|
|
|
* Upload can set modtime... - set as int64_t - can set ctime and mtime?
|
|
|
|
*/
|
|
|
|
|
|
|
|
import (
|
2019-06-17 16:34:30 +08:00
|
|
|
"context"
|
2017-10-15 18:45:39 +08:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"path"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/pkg/errors"
|
2019-07-29 01:47:38 +08:00
|
|
|
"github.com/rclone/rclone/fs"
|
2020-01-15 01:33:35 +08:00
|
|
|
"github.com/rclone/rclone/fs/config"
|
2019-07-29 01:47:38 +08:00
|
|
|
"github.com/rclone/rclone/fs/config/configmap"
|
|
|
|
"github.com/rclone/rclone/fs/config/configstruct"
|
|
|
|
"github.com/rclone/rclone/fs/config/obscure"
|
2021-03-16 23:50:02 +08:00
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
2019-07-29 01:47:38 +08:00
|
|
|
"github.com/rclone/rclone/fs/fshttp"
|
|
|
|
"github.com/rclone/rclone/fs/hash"
|
2020-01-15 01:33:35 +08:00
|
|
|
"github.com/rclone/rclone/lib/encoder"
|
2019-07-29 01:47:38 +08:00
|
|
|
"github.com/rclone/rclone/lib/pacer"
|
|
|
|
"github.com/rclone/rclone/lib/readers"
|
2017-10-15 18:45:39 +08:00
|
|
|
mega "github.com/t3rm1n4l/go-mega"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
minSleep = 10 * time.Millisecond
|
|
|
|
maxSleep = 2 * time.Second
|
2018-06-30 18:09:45 +08:00
|
|
|
eventWaitTime = 500 * time.Millisecond
|
2018-08-01 20:44:19 +08:00
|
|
|
decayConstant = 2 // bigger for slower decay, exponential
|
2017-10-15 18:45:39 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
megaCacheMu sync.Mutex // mutex for the below
|
|
|
|
megaCache = map[string]*mega.Mega{} // cache logged in Mega's by user
|
|
|
|
)
|
|
|
|
|
|
|
|
// Register with Fs
|
|
|
|
func init() {
|
|
|
|
fs.Register(&fs.RegInfo{
|
|
|
|
Name: "mega",
|
|
|
|
Description: "Mega",
|
|
|
|
NewFs: NewFs,
|
|
|
|
Options: []fs.Option{{
|
|
|
|
Name: "user",
|
|
|
|
Help: "User name",
|
2018-05-15 01:06:57 +08:00
|
|
|
Required: true,
|
2017-10-15 18:45:39 +08:00
|
|
|
}, {
|
|
|
|
Name: "pass",
|
|
|
|
Help: "Password.",
|
2018-05-15 01:06:57 +08:00
|
|
|
Required: true,
|
2017-10-15 18:45:39 +08:00
|
|
|
IsPassword: true,
|
2018-05-15 01:06:57 +08:00
|
|
|
}, {
|
2018-10-02 01:36:15 +08:00
|
|
|
Name: "debug",
|
|
|
|
Help: `Output more debug from Mega.
|
|
|
|
|
|
|
|
If this flag is set (along with -vv) it will print further debugging
|
|
|
|
information from the mega backend.`,
|
2018-08-01 20:44:19 +08:00
|
|
|
Default: false,
|
|
|
|
Advanced: true,
|
|
|
|
}, {
|
2018-10-02 01:36:15 +08:00
|
|
|
Name: "hard_delete",
|
|
|
|
Help: `Delete files permanently rather than putting them into the trash.
|
|
|
|
|
|
|
|
Normally the mega backend will put all deletions into the trash rather
|
|
|
|
than permanently deleting them. If you specify this then rclone will
|
|
|
|
permanently delete objects instead.`,
|
2018-05-15 01:06:57 +08:00
|
|
|
Default: false,
|
|
|
|
Advanced: true,
|
2020-01-15 01:33:35 +08:00
|
|
|
}, {
|
|
|
|
Name: config.ConfigEncoding,
|
|
|
|
Help: config.ConfigEncodingHelp,
|
|
|
|
Advanced: true,
|
2020-01-15 05:51:49 +08:00
|
|
|
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
|
|
|
Default: (encoder.Base |
|
|
|
|
encoder.EncodeInvalidUtf8),
|
2017-10-15 18:45:39 +08:00
|
|
|
}},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-05-15 01:06:57 +08:00
|
|
|
// Options defines the configuration for this backend
|
|
|
|
type Options struct {
|
2020-01-15 01:33:35 +08:00
|
|
|
User string `config:"user"`
|
|
|
|
Pass string `config:"pass"`
|
|
|
|
Debug bool `config:"debug"`
|
|
|
|
HardDelete bool `config:"hard_delete"`
|
|
|
|
Enc encoder.MultiEncoder `config:"encoding"`
|
2018-05-15 01:06:57 +08:00
|
|
|
}
|
|
|
|
|
2017-10-15 18:45:39 +08:00
|
|
|
// Fs represents a remote mega
|
|
|
|
type Fs struct {
|
|
|
|
name string // name of this remote
|
|
|
|
root string // the path we are working on
|
2018-05-15 01:06:57 +08:00
|
|
|
opt Options // parsed config options
|
2017-10-15 18:45:39 +08:00
|
|
|
features *fs.Features // optional features
|
|
|
|
srv *mega.Mega // the connection to the server
|
2019-02-10 04:52:15 +08:00
|
|
|
pacer *fs.Pacer // pacer for API calls
|
2017-10-15 18:45:39 +08:00
|
|
|
rootNodeMu sync.Mutex // mutex for _rootNode
|
|
|
|
_rootNode *mega.Node // root node - call findRoot to use this
|
|
|
|
mkdirMu sync.Mutex // used to serialize calls to mkdir / rmdir
|
|
|
|
}
|
|
|
|
|
|
|
|
// Object describes a mega object
|
|
|
|
//
|
|
|
|
// Will definitely have info but maybe not meta
|
|
|
|
//
|
|
|
|
// Normally rclone would just store an ID here but go-mega and mega.nz
|
|
|
|
// expect you to build an entire tree of all the objects in memory.
|
|
|
|
// In this case we just store a pointer to the object.
|
|
|
|
type Object struct {
|
|
|
|
fs *Fs // what this object is part of
|
|
|
|
remote string // The remote path
|
|
|
|
info *mega.Node // pointer to the mega node
|
|
|
|
}
|
|
|
|
|
|
|
|
// ------------------------------------------------------------
|
|
|
|
|
|
|
|
// Name of the remote (as passed into NewFs)
|
|
|
|
func (f *Fs) Name() string {
|
|
|
|
return f.name
|
|
|
|
}
|
|
|
|
|
|
|
|
// Root of the remote (as passed into NewFs)
|
|
|
|
func (f *Fs) Root() string {
|
|
|
|
return f.root
|
|
|
|
}
|
|
|
|
|
|
|
|
// String converts this Fs to a string
|
|
|
|
func (f *Fs) String() string {
|
|
|
|
return fmt.Sprintf("mega root '%s'", f.root)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Features returns the optional features of this Fs
|
|
|
|
func (f *Fs) Features() *fs.Features {
|
|
|
|
return f.features
|
|
|
|
}
|
|
|
|
|
2020-05-20 18:39:20 +08:00
|
|
|
// parsePath parses a mega 'url'
|
2017-10-15 18:45:39 +08:00
|
|
|
func parsePath(path string) (root string) {
|
|
|
|
root = strings.Trim(path, "/")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// shouldRetry returns a boolean as to whether this err deserves to be
|
|
|
|
// retried. It returns the err as a convenience
|
2021-03-16 23:50:02 +08:00
|
|
|
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|
|
|
if fserrors.ContextError(ctx, &err) {
|
|
|
|
return false, err
|
|
|
|
}
|
2017-10-15 18:45:39 +08:00
|
|
|
// Let the mega library handle the low level retries
|
|
|
|
return false, err
|
|
|
|
/*
|
|
|
|
switch errors.Cause(err) {
|
|
|
|
case mega.EAGAIN, mega.ERATELIMIT, mega.ETEMPUNAVAIL:
|
|
|
|
return true, err
|
|
|
|
}
|
|
|
|
return fserrors.ShouldRetry(err), err
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
// readMetaDataForPath reads the metadata from the path
|
2021-03-16 23:50:02 +08:00
|
|
|
func (f *Fs) readMetaDataForPath(ctx context.Context, remote string) (info *mega.Node, err error) {
|
|
|
|
rootNode, err := f.findRoot(ctx, false)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return f.findObject(rootNode, remote)
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewFs constructs an Fs from the path, container:path
|
2020-11-05 23:18:51 +08:00
|
|
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
2018-05-15 01:06:57 +08:00
|
|
|
// Parse config into Options struct
|
|
|
|
opt := new(Options)
|
|
|
|
err := configstruct.Set(m, opt)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if opt.Pass != "" {
|
2017-10-15 18:45:39 +08:00
|
|
|
var err error
|
2018-05-15 01:06:57 +08:00
|
|
|
opt.Pass, err = obscure.Reveal(opt.Pass)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "couldn't decrypt password")
|
|
|
|
}
|
|
|
|
}
|
2020-11-05 19:33:32 +08:00
|
|
|
ci := fs.GetConfig(ctx)
|
2017-10-15 18:45:39 +08:00
|
|
|
|
|
|
|
// cache *mega.Mega on username so we can re-use and share
|
|
|
|
// them between remotes. They are expensive to make as they
|
|
|
|
// contain all the objects and sharing the objects makes the
|
|
|
|
// move code easier as we don't have to worry about mixing
|
|
|
|
// them up between different remotes.
|
|
|
|
megaCacheMu.Lock()
|
|
|
|
defer megaCacheMu.Unlock()
|
2018-05-15 01:06:57 +08:00
|
|
|
srv := megaCache[opt.User]
|
2017-10-15 18:45:39 +08:00
|
|
|
if srv == nil {
|
2020-11-13 23:24:43 +08:00
|
|
|
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
2020-11-05 19:33:32 +08:00
|
|
|
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
2017-10-15 18:45:39 +08:00
|
|
|
srv.SetLogger(func(format string, v ...interface{}) {
|
|
|
|
fs.Infof("*go-mega*", format, v...)
|
|
|
|
})
|
2018-05-15 01:06:57 +08:00
|
|
|
if opt.Debug {
|
2017-10-15 18:45:39 +08:00
|
|
|
srv.SetDebugger(func(format string, v ...interface{}) {
|
|
|
|
fs.Debugf("*go-mega*", format, v...)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-05-15 01:06:57 +08:00
|
|
|
err := srv.Login(opt.User, opt.Pass)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "couldn't login")
|
|
|
|
}
|
2018-05-15 01:06:57 +08:00
|
|
|
megaCache[opt.User] = srv
|
2017-10-15 18:45:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
root = parsePath(root)
|
|
|
|
f := &Fs{
|
|
|
|
name: name,
|
|
|
|
root: root,
|
2018-05-15 01:06:57 +08:00
|
|
|
opt: *opt,
|
2017-10-15 18:45:39 +08:00
|
|
|
srv: srv,
|
2020-11-05 19:33:32 +08:00
|
|
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
2017-10-15 18:45:39 +08:00
|
|
|
}
|
|
|
|
f.features = (&fs.Features{
|
|
|
|
DuplicateFiles: true,
|
|
|
|
CanHaveEmptyDirectories: true,
|
2020-11-06 00:00:40 +08:00
|
|
|
}).Fill(ctx, f)
|
2017-10-15 18:45:39 +08:00
|
|
|
|
|
|
|
// Find the root node and check if it is a file or not
|
2021-03-16 23:50:02 +08:00
|
|
|
_, err = f.findRoot(ctx, false)
|
2017-10-15 18:45:39 +08:00
|
|
|
switch err {
|
|
|
|
case nil:
|
|
|
|
// root node found and is a directory
|
|
|
|
case fs.ErrorDirNotFound:
|
|
|
|
// root node not found, so can't be a file
|
|
|
|
case fs.ErrorIsFile:
|
|
|
|
// root node is a file so point to parent directory
|
|
|
|
root = path.Dir(root)
|
|
|
|
if root == "." {
|
|
|
|
root = ""
|
|
|
|
}
|
|
|
|
f.root = root
|
|
|
|
return f, err
|
|
|
|
}
|
|
|
|
return f, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// splitNodePath splits nodePath into / separated parts, returning nil if it
|
2018-11-02 20:14:55 +08:00
|
|
|
// should refer to the root.
|
|
|
|
// It also encodes the parts into backend specific encoding
|
2020-01-15 01:33:35 +08:00
|
|
|
func (f *Fs) splitNodePath(nodePath string) (parts []string) {
|
2017-10-15 18:45:39 +08:00
|
|
|
nodePath = path.Clean(nodePath)
|
2019-10-02 19:41:52 +08:00
|
|
|
if nodePath == "." || nodePath == "/" {
|
2017-10-15 18:45:39 +08:00
|
|
|
return nil
|
|
|
|
}
|
2020-01-15 01:33:35 +08:00
|
|
|
nodePath = f.opt.Enc.FromStandardPath(nodePath)
|
2019-10-02 19:41:52 +08:00
|
|
|
return strings.Split(nodePath, "/")
|
2017-10-15 18:45:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// findNode looks up the node for the path of the name given from the root given
|
|
|
|
//
|
|
|
|
// It returns mega.ENOENT if it wasn't found
|
|
|
|
func (f *Fs) findNode(rootNode *mega.Node, nodePath string) (*mega.Node, error) {
|
2020-01-15 01:33:35 +08:00
|
|
|
parts := f.splitNodePath(nodePath)
|
2017-10-15 18:45:39 +08:00
|
|
|
if parts == nil {
|
|
|
|
return rootNode, nil
|
|
|
|
}
|
|
|
|
nodes, err := f.srv.FS.PathLookup(rootNode, parts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return nodes[len(nodes)-1], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// findDir finds the directory rooted from the node passed in
|
|
|
|
func (f *Fs) findDir(rootNode *mega.Node, dir string) (node *mega.Node, err error) {
|
|
|
|
node, err = f.findNode(rootNode, dir)
|
|
|
|
if err == mega.ENOENT {
|
|
|
|
return nil, fs.ErrorDirNotFound
|
|
|
|
} else if err == nil && node.GetType() == mega.FILE {
|
|
|
|
return nil, fs.ErrorIsFile
|
|
|
|
}
|
|
|
|
return node, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// findObject looks up the node for the object of the name given
|
|
|
|
func (f *Fs) findObject(rootNode *mega.Node, file string) (node *mega.Node, err error) {
|
|
|
|
node, err = f.findNode(rootNode, file)
|
|
|
|
if err == mega.ENOENT {
|
|
|
|
return nil, fs.ErrorObjectNotFound
|
|
|
|
} else if err == nil && node.GetType() != mega.FILE {
|
|
|
|
return nil, fs.ErrorNotAFile
|
|
|
|
}
|
|
|
|
return node, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// lookupDir looks up the node for the directory of the name given
|
|
|
|
//
|
|
|
|
// if create is true it tries to create the root directory if not found
|
2021-03-16 23:50:02 +08:00
|
|
|
func (f *Fs) lookupDir(ctx context.Context, dir string) (*mega.Node, error) {
|
|
|
|
rootNode, err := f.findRoot(ctx, false)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return f.findDir(rootNode, dir)
|
|
|
|
}
|
|
|
|
|
|
|
|
// lookupParentDir finds the parent node for the remote passed in
|
2021-03-16 23:50:02 +08:00
|
|
|
func (f *Fs) lookupParentDir(ctx context.Context, remote string) (dirNode *mega.Node, leaf string, err error) {
|
2017-10-15 18:45:39 +08:00
|
|
|
parent, leaf := path.Split(remote)
|
2021-03-16 23:50:02 +08:00
|
|
|
dirNode, err = f.lookupDir(ctx, parent)
|
2017-10-15 18:45:39 +08:00
|
|
|
return dirNode, leaf, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// mkdir makes the directory and any parent directories for the
|
|
|
|
// directory of the name given
|
2021-03-16 23:50:02 +08:00
|
|
|
func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *mega.Node, err error) {
|
2017-10-15 18:45:39 +08:00
|
|
|
f.mkdirMu.Lock()
|
|
|
|
defer f.mkdirMu.Unlock()
|
|
|
|
|
2020-01-15 01:33:35 +08:00
|
|
|
parts := f.splitNodePath(dir)
|
2017-10-15 18:45:39 +08:00
|
|
|
if parts == nil {
|
|
|
|
return rootNode, nil
|
|
|
|
}
|
|
|
|
var i int
|
|
|
|
// look up until we find a directory which exists
|
|
|
|
for i = 0; i <= len(parts); i++ {
|
|
|
|
var nodes []*mega.Node
|
|
|
|
nodes, err = f.srv.FS.PathLookup(rootNode, parts[:len(parts)-i])
|
|
|
|
if err == nil {
|
|
|
|
if len(nodes) == 0 {
|
|
|
|
node = rootNode
|
|
|
|
} else {
|
|
|
|
node = nodes[len(nodes)-1]
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != mega.ENOENT {
|
|
|
|
return nil, errors.Wrap(err, "mkdir lookup failed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "internal error: mkdir called with non existent root node")
|
|
|
|
}
|
|
|
|
// i is number of directories to create (may be 0)
|
|
|
|
// node is directory to create them from
|
|
|
|
for _, name := range parts[len(parts)-i:] {
|
|
|
|
// create directory called name in node
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
|
|
|
node, err = f.srv.CreateDir(name, node)
|
2021-03-16 23:50:02 +08:00
|
|
|
return shouldRetry(ctx, err)
|
2017-10-15 18:45:39 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "mkdir create node failed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return node, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// mkdirParent creates the parent directory of remote
|
2021-03-16 23:50:02 +08:00
|
|
|
func (f *Fs) mkdirParent(ctx context.Context, remote string) (dirNode *mega.Node, leaf string, err error) {
|
|
|
|
rootNode, err := f.findRoot(ctx, true)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, "", err
|
|
|
|
}
|
|
|
|
parent, leaf := path.Split(remote)
|
2021-03-16 23:50:02 +08:00
|
|
|
dirNode, err = f.mkdir(ctx, rootNode, parent)
|
2017-10-15 18:45:39 +08:00
|
|
|
return dirNode, leaf, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// findRoot looks up the root directory node and returns it.
|
|
|
|
//
|
|
|
|
// if create is true it tries to create the root directory if not found
|
2021-03-16 23:50:02 +08:00
|
|
|
func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
|
2017-10-15 18:45:39 +08:00
|
|
|
f.rootNodeMu.Lock()
|
|
|
|
defer f.rootNodeMu.Unlock()
|
|
|
|
|
|
|
|
// Check if we haven't found it already
|
|
|
|
if f._rootNode != nil {
|
|
|
|
return f._rootNode, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for pre-existing root
|
|
|
|
absRoot := f.srv.FS.GetRoot()
|
|
|
|
node, err := f.findDir(absRoot, f.root)
|
|
|
|
//log.Printf("findRoot findDir %p %v", node, err)
|
|
|
|
if err == nil {
|
|
|
|
f._rootNode = node
|
|
|
|
return node, nil
|
|
|
|
}
|
|
|
|
if !create || err != fs.ErrorDirNotFound {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
//..not found so create the root directory
|
2021-03-16 23:50:02 +08:00
|
|
|
f._rootNode, err = f.mkdir(ctx, absRoot, f.root)
|
2017-10-15 18:45:39 +08:00
|
|
|
return f._rootNode, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// clearRoot unsets the root directory
|
|
|
|
func (f *Fs) clearRoot() {
|
|
|
|
f.rootNodeMu.Lock()
|
|
|
|
f._rootNode = nil
|
|
|
|
f.rootNodeMu.Unlock()
|
|
|
|
//log.Printf("cleared root directory")
|
|
|
|
}
|
|
|
|
|
2019-04-26 21:45:50 +08:00
|
|
|
// CleanUp deletes all files currently in trash
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
2019-04-26 21:45:50 +08:00
|
|
|
trash := f.srv.FS.GetTrash()
|
|
|
|
items := []*mega.Node{}
|
2019-06-17 16:34:30 +08:00
|
|
|
_, err = f.list(ctx, trash, func(item *mega.Node) bool {
|
2019-04-26 21:45:50 +08:00
|
|
|
items = append(items, item)
|
|
|
|
return false
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "CleanUp failed to list items in trash")
|
|
|
|
}
|
2019-05-12 17:46:21 +08:00
|
|
|
fs.Infof(f, "Deleting %d items from the trash", len(items))
|
|
|
|
errors := 0
|
2019-04-26 21:45:50 +08:00
|
|
|
// similar to f.deleteNode(trash) but with HardDelete as true
|
|
|
|
for _, item := range items {
|
2020-01-15 01:33:35 +08:00
|
|
|
fs.Debugf(f, "Deleting trash %q", f.opt.Enc.ToStandardName(item.GetName()))
|
2019-05-12 17:46:21 +08:00
|
|
|
deleteErr := f.pacer.Call(func() (bool, error) {
|
|
|
|
err := f.srv.Delete(item, true)
|
2021-03-16 23:50:02 +08:00
|
|
|
return shouldRetry(ctx, err)
|
2019-04-26 21:45:50 +08:00
|
|
|
})
|
2019-05-12 17:46:21 +08:00
|
|
|
if deleteErr != nil {
|
|
|
|
err = deleteErr
|
|
|
|
errors++
|
|
|
|
}
|
2019-04-26 21:45:50 +08:00
|
|
|
}
|
2019-05-12 17:46:21 +08:00
|
|
|
fs.Infof(f, "Deleted %d items from the trash with %d errors", len(items), errors)
|
2019-04-26 21:45:50 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-10-15 18:45:39 +08:00
|
|
|
// Return an Object from a path
|
|
|
|
//
|
|
|
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
2021-03-16 23:50:02 +08:00
|
|
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *mega.Node) (fs.Object, error) {
|
2017-10-15 18:45:39 +08:00
|
|
|
o := &Object{
|
|
|
|
fs: f,
|
|
|
|
remote: remote,
|
|
|
|
}
|
|
|
|
var err error
|
|
|
|
if info != nil {
|
|
|
|
// Set info
|
|
|
|
err = o.setMetaData(info)
|
|
|
|
} else {
|
2021-03-16 23:50:02 +08:00
|
|
|
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
2017-10-15 18:45:39 +08:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return o, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewObject finds the Object at remote. If it can't be found
|
|
|
|
// it returns the error fs.ErrorObjectNotFound.
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
2021-03-16 23:50:02 +08:00
|
|
|
return f.newObjectWithInfo(ctx, remote, nil)
|
2017-10-15 18:45:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// list the objects into the function supplied
|
|
|
|
//
|
|
|
|
// If directories is set it only sends directories
|
|
|
|
// User function to process a File item from listAll
|
|
|
|
//
|
|
|
|
// Should return true to finish processing
|
|
|
|
type listFn func(*mega.Node) bool
|
|
|
|
|
|
|
|
// Lists the directory required calling the user function on each item found
|
|
|
|
//
|
|
|
|
// If the user fn ever returns true then it early exits with found = true
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, err error) {
|
2017-10-15 18:45:39 +08:00
|
|
|
nodes, err := f.srv.FS.GetChildren(dir)
|
|
|
|
if err != nil {
|
|
|
|
return false, errors.Wrapf(err, "list failed")
|
|
|
|
}
|
|
|
|
for _, item := range nodes {
|
|
|
|
if fn(item) {
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// List the objects and directories in dir into entries. The
|
|
|
|
// entries can be returned in any order but should be for a
|
|
|
|
// complete directory.
|
|
|
|
//
|
|
|
|
// dir should be "" to list the root, and should not have
|
|
|
|
// trailing slashes.
|
|
|
|
//
|
|
|
|
// This should return ErrDirNotFound if the directory isn't
|
|
|
|
// found.
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
2021-03-16 23:50:02 +08:00
|
|
|
dirNode, err := f.lookupDir(ctx, dir)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var iErr error
|
2019-06-17 16:34:30 +08:00
|
|
|
_, err = f.list(ctx, dirNode, func(info *mega.Node) bool {
|
2020-01-15 01:33:35 +08:00
|
|
|
remote := path.Join(dir, f.opt.Enc.ToStandardName(info.GetName()))
|
2017-10-15 18:45:39 +08:00
|
|
|
switch info.GetType() {
|
|
|
|
case mega.FOLDER, mega.ROOT, mega.INBOX, mega.TRASH:
|
|
|
|
d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
|
|
|
|
entries = append(entries, d)
|
|
|
|
case mega.FILE:
|
2021-03-16 23:50:02 +08:00
|
|
|
o, err := f.newObjectWithInfo(ctx, remote, info)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
iErr = err
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
entries = append(entries, o)
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if iErr != nil {
|
|
|
|
return nil, iErr
|
|
|
|
}
|
|
|
|
return entries, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Creates from the parameters passed in a half finished Object which
|
|
|
|
// must have setMetaData called on it
|
|
|
|
//
|
2019-02-08 01:41:17 +08:00
|
|
|
// Returns the dirNode, object, leaf and error
|
2017-10-15 18:45:39 +08:00
|
|
|
//
|
|
|
|
// Used to create new objects
|
2021-03-16 23:50:02 +08:00
|
|
|
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
|
|
|
|
dirNode, leaf, err = f.mkdirParent(ctx, remote)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, leaf, err
|
|
|
|
}
|
|
|
|
// Temporary Object under construction
|
|
|
|
o = &Object{
|
|
|
|
fs: f,
|
|
|
|
remote: remote,
|
|
|
|
}
|
|
|
|
return o, dirNode, leaf, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Put the object
|
|
|
|
//
|
|
|
|
// Copy the reader in to the new object which is returned
|
|
|
|
//
|
|
|
|
// The new object may have been created if an error is returned
|
|
|
|
// PutUnchecked uploads the object
|
|
|
|
//
|
|
|
|
// This will create a duplicate if we upload a new file without
|
|
|
|
// checking to see if there is one already - use Put() for that.
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
2021-03-16 23:50:02 +08:00
|
|
|
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
|
2017-10-15 18:45:39 +08:00
|
|
|
switch err {
|
|
|
|
case nil:
|
2019-06-17 16:34:30 +08:00
|
|
|
return existingObj, existingObj.Update(ctx, in, src, options...)
|
2017-10-15 18:45:39 +08:00
|
|
|
case fs.ErrorObjectNotFound:
|
|
|
|
// Not found so create it
|
2019-06-17 16:34:30 +08:00
|
|
|
return f.PutUnchecked(ctx, in, src)
|
2017-10-15 18:45:39 +08:00
|
|
|
default:
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// PutUnchecked the object
|
|
|
|
//
|
|
|
|
// Copy the reader in to the new object which is returned
|
|
|
|
//
|
|
|
|
// The new object may have been created if an error is returned
|
|
|
|
// PutUnchecked uploads the object
|
|
|
|
//
|
|
|
|
// This will create a duplicate if we upload a new file without
|
|
|
|
// checking to see if there is one already - use Put() for that.
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
2017-10-15 18:45:39 +08:00
|
|
|
remote := src.Remote()
|
|
|
|
size := src.Size()
|
2019-06-17 16:34:30 +08:00
|
|
|
modTime := src.ModTime(ctx)
|
2017-10-15 18:45:39 +08:00
|
|
|
|
2021-03-16 23:50:02 +08:00
|
|
|
o, _, _, err := f.createObject(ctx, remote, modTime, size)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-06-17 16:34:30 +08:00
|
|
|
return o, o.Update(ctx, in, src, options...)
|
2017-10-15 18:45:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Mkdir creates the directory if it doesn't exist
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
2021-03-16 23:50:02 +08:00
|
|
|
rootNode, err := f.findRoot(ctx, true)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-03-16 23:50:02 +08:00
|
|
|
_, err = f.mkdir(ctx, rootNode, dir)
|
2017-10-15 18:45:39 +08:00
|
|
|
return errors.Wrap(err, "Mkdir failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// deleteNode removes a file or directory, observing useTrash
|
2021-03-16 23:50:02 +08:00
|
|
|
func (f *Fs) deleteNode(ctx context.Context, node *mega.Node) (err error) {
|
2017-10-15 18:45:39 +08:00
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2018-08-01 20:44:19 +08:00
|
|
|
err = f.srv.Delete(node, f.opt.HardDelete)
|
2021-03-16 23:50:02 +08:00
|
|
|
return shouldRetry(ctx, err)
|
2017-10-15 18:45:39 +08:00
|
|
|
})
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// purgeCheck removes the directory dir, if check is set then it
|
|
|
|
// refuses to do so if it has anything in
|
2021-03-16 23:50:02 +08:00
|
|
|
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
2017-10-15 18:45:39 +08:00
|
|
|
f.mkdirMu.Lock()
|
|
|
|
defer f.mkdirMu.Unlock()
|
|
|
|
|
2021-03-16 23:50:02 +08:00
|
|
|
rootNode, err := f.findRoot(ctx, false)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
dirNode, err := f.findDir(rootNode, dir)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if check {
|
|
|
|
children, err := f.srv.FS.GetChildren(dirNode)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "purgeCheck GetChildren failed")
|
|
|
|
}
|
|
|
|
if len(children) > 0 {
|
|
|
|
return fs.ErrorDirectoryNotEmpty
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-30 18:09:45 +08:00
|
|
|
waitEvent := f.srv.WaitEventsStart()
|
|
|
|
|
2021-03-16 23:50:02 +08:00
|
|
|
err = f.deleteNode(ctx, dirNode)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "delete directory node failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the root node if we just deleted it
|
|
|
|
if dirNode == rootNode {
|
|
|
|
f.clearRoot()
|
|
|
|
}
|
2018-06-30 18:09:45 +08:00
|
|
|
|
|
|
|
f.srv.WaitEvents(waitEvent, eventWaitTime)
|
2017-10-15 18:45:39 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Rmdir deletes the root folder
|
|
|
|
//
|
|
|
|
// Returns an error if it isn't empty
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
2021-03-16 23:50:02 +08:00
|
|
|
return f.purgeCheck(ctx, dir, true)
|
2017-10-15 18:45:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Precision return the precision of this Fs
|
|
|
|
func (f *Fs) Precision() time.Duration {
|
|
|
|
return fs.ModTimeNotSupported
|
|
|
|
}
|
|
|
|
|
2020-06-05 05:25:14 +08:00
|
|
|
// Purge deletes all the files in the directory
|
2017-10-15 18:45:39 +08:00
|
|
|
//
|
|
|
|
// Optional interface: Only implement this if you have a way of
|
|
|
|
// deleting all the files quicker than just running Remove() on the
|
|
|
|
// result of List()
|
2020-06-05 05:25:14 +08:00
|
|
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
2021-03-16 23:50:02 +08:00
|
|
|
return f.purgeCheck(ctx, dir, false)
|
2017-10-15 18:45:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// move a file or folder (srcFs, srcRemote, info) to (f, dstRemote)
|
|
|
|
//
|
|
|
|
// info will be updates
|
2021-03-16 23:50:02 +08:00
|
|
|
func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node) (err error) {
|
2017-10-15 18:45:39 +08:00
|
|
|
var (
|
|
|
|
dstFs = f
|
|
|
|
srcDirNode, dstDirNode *mega.Node
|
|
|
|
srcParent, dstParent string
|
|
|
|
srcLeaf, dstLeaf string
|
|
|
|
)
|
|
|
|
|
|
|
|
if dstRemote != "" {
|
|
|
|
// lookup or create the destination parent directory
|
2021-03-16 23:50:02 +08:00
|
|
|
dstDirNode, dstLeaf, err = dstFs.mkdirParent(ctx, dstRemote)
|
2017-10-15 18:45:39 +08:00
|
|
|
} else {
|
|
|
|
// find or create the parent of the root directory
|
|
|
|
absRoot := dstFs.srv.FS.GetRoot()
|
|
|
|
dstParent, dstLeaf = path.Split(dstFs.root)
|
2021-03-16 23:50:02 +08:00
|
|
|
dstDirNode, err = dstFs.mkdir(ctx, absRoot, dstParent)
|
2017-10-15 18:45:39 +08:00
|
|
|
}
|
|
|
|
if err != nil {
|
2020-10-14 05:43:40 +08:00
|
|
|
return errors.Wrap(err, "server-side move failed to make dst parent dir")
|
2017-10-15 18:45:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if srcRemote != "" {
|
|
|
|
// lookup the existing parent directory
|
2021-03-16 23:50:02 +08:00
|
|
|
srcDirNode, srcLeaf, err = srcFs.lookupParentDir(ctx, srcRemote)
|
2017-10-15 18:45:39 +08:00
|
|
|
} else {
|
|
|
|
// lookup the existing root parent
|
|
|
|
absRoot := srcFs.srv.FS.GetRoot()
|
|
|
|
srcParent, srcLeaf = path.Split(srcFs.root)
|
|
|
|
srcDirNode, err = f.findDir(absRoot, srcParent)
|
|
|
|
}
|
|
|
|
if err != nil {
|
2020-10-14 05:43:40 +08:00
|
|
|
return errors.Wrap(err, "server-side move failed to lookup src parent dir")
|
2017-10-15 18:45:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// move the object into its new directory if required
|
|
|
|
if srcDirNode != dstDirNode && srcDirNode.GetHash() != dstDirNode.GetHash() {
|
|
|
|
//log.Printf("move src %p %q dst %p %q", srcDirNode, srcDirNode.GetName(), dstDirNode, dstDirNode.GetName())
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
|
|
|
err = f.srv.Move(info, dstDirNode)
|
2021-03-16 23:50:02 +08:00
|
|
|
return shouldRetry(ctx, err)
|
2017-10-15 18:45:39 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
2020-10-14 05:43:40 +08:00
|
|
|
return errors.Wrap(err, "server-side move failed")
|
2017-10-15 18:45:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-30 18:09:45 +08:00
|
|
|
waitEvent := f.srv.WaitEventsStart()
|
|
|
|
|
2017-10-15 18:45:39 +08:00
|
|
|
// rename the object if required
|
|
|
|
if srcLeaf != dstLeaf {
|
|
|
|
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2020-01-15 01:33:35 +08:00
|
|
|
err = f.srv.Rename(info, f.opt.Enc.FromStandardName(dstLeaf))
|
2021-03-16 23:50:02 +08:00
|
|
|
return shouldRetry(ctx, err)
|
2017-10-15 18:45:39 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
2020-10-14 05:43:40 +08:00
|
|
|
return errors.Wrap(err, "server-side rename failed")
|
2017-10-15 18:45:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-30 18:09:45 +08:00
|
|
|
f.srv.WaitEvents(waitEvent, eventWaitTime)
|
|
|
|
|
2017-10-15 18:45:39 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-10-14 05:43:40 +08:00
|
|
|
// Move src to this remote using server-side move operations.
|
2017-10-15 18:45:39 +08:00
|
|
|
//
|
|
|
|
// This is stored with the remote path given
|
|
|
|
//
|
|
|
|
// It returns the destination Object and a possible error
|
|
|
|
//
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
//
|
|
|
|
// If it isn't possible then return fs.ErrorCantMove
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
2017-10-15 18:45:39 +08:00
|
|
|
dstFs := f
|
|
|
|
|
|
|
|
//log.Printf("Move %q -> %q", src.Remote(), remote)
|
|
|
|
srcObj, ok := src.(*Object)
|
|
|
|
if !ok {
|
|
|
|
fs.Debugf(src, "Can't move - not same remote type")
|
|
|
|
return nil, fs.ErrorCantMove
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do the move
|
2021-03-16 23:50:02 +08:00
|
|
|
err := f.move(ctx, remote, srcObj.fs, srcObj.remote, srcObj.info)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a destination object
|
|
|
|
dstObj := &Object{
|
|
|
|
fs: dstFs,
|
|
|
|
remote: remote,
|
|
|
|
info: srcObj.info,
|
|
|
|
}
|
|
|
|
return dstObj, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
2020-10-14 05:43:40 +08:00
|
|
|
// using server-side move operations.
|
2017-10-15 18:45:39 +08:00
|
|
|
//
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
//
|
|
|
|
// If it isn't possible then return fs.ErrorCantDirMove
|
|
|
|
//
|
|
|
|
// If destination exists then return fs.ErrorDirExists
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
2017-10-15 18:45:39 +08:00
|
|
|
dstFs := f
|
|
|
|
srcFs, ok := src.(*Fs)
|
|
|
|
if !ok {
|
|
|
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
|
|
|
return fs.ErrorCantDirMove
|
|
|
|
}
|
|
|
|
|
|
|
|
// find the source
|
2021-03-16 23:50:02 +08:00
|
|
|
info, err := srcFs.lookupDir(ctx, srcRemote)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// check the destination doesn't exist
|
2021-03-16 23:50:02 +08:00
|
|
|
_, err = dstFs.lookupDir(ctx, dstRemote)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err == nil {
|
|
|
|
return fs.ErrorDirExists
|
|
|
|
} else if err != fs.ErrorDirNotFound {
|
|
|
|
return errors.Wrap(err, "DirMove error while checking dest directory")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do the move
|
2021-03-16 23:50:02 +08:00
|
|
|
err = f.move(ctx, dstRemote, srcFs, srcRemote, info)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clear src if it was the root
|
|
|
|
if srcRemote == "" {
|
|
|
|
srcFs.clearRoot()
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DirCacheFlush an optional interface to flush internal directory cache
|
|
|
|
func (f *Fs) DirCacheFlush() {
|
|
|
|
// f.dirCache.ResetRoot()
|
|
|
|
// FIXME Flush the mega somehow?
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hashes returns the supported hash sets.
|
|
|
|
func (f *Fs) Hashes() hash.Set {
|
|
|
|
return hash.Set(hash.None)
|
|
|
|
}
|
|
|
|
|
|
|
|
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
2020-06-01 05:18:01 +08:00
|
|
|
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
2021-03-16 23:50:02 +08:00
|
|
|
root, err := f.findRoot(ctx, false)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return "", errors.Wrap(err, "PublicLink failed to find root node")
|
|
|
|
}
|
|
|
|
node, err := f.findNode(root, remote)
|
|
|
|
if err != nil {
|
|
|
|
return "", errors.Wrap(err, "PublicLink failed to find path")
|
|
|
|
}
|
|
|
|
link, err = f.srv.Link(node, true)
|
|
|
|
if err != nil {
|
|
|
|
return "", errors.Wrap(err, "PublicLink failed to create link")
|
|
|
|
}
|
|
|
|
return link, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// MergeDirs merges the contents of all the directories passed
|
|
|
|
// in into the first one and rmdirs the other directories.
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
2017-10-15 18:45:39 +08:00
|
|
|
if len(dirs) < 2 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// find dst directory
|
|
|
|
dstDir := dirs[0]
|
|
|
|
dstDirNode := f.srv.FS.HashLookup(dstDir.ID())
|
|
|
|
if dstDirNode == nil {
|
|
|
|
return errors.Errorf("MergeDirs failed to find node for: %v", dstDir)
|
|
|
|
}
|
|
|
|
for _, srcDir := range dirs[1:] {
|
|
|
|
// find src directory
|
|
|
|
srcDirNode := f.srv.FS.HashLookup(srcDir.ID())
|
|
|
|
if srcDirNode == nil {
|
|
|
|
return errors.Errorf("MergeDirs failed to find node for: %v", srcDir)
|
|
|
|
}
|
|
|
|
|
2020-05-25 14:05:53 +08:00
|
|
|
// list the objects
|
2017-10-15 18:45:39 +08:00
|
|
|
infos := []*mega.Node{}
|
2019-06-17 16:34:30 +08:00
|
|
|
_, err := f.list(ctx, srcDirNode, func(info *mega.Node) bool {
|
2017-10-15 18:45:39 +08:00
|
|
|
infos = append(infos, info)
|
|
|
|
return false
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
|
|
|
|
}
|
|
|
|
// move them into place
|
|
|
|
for _, info := range infos {
|
2020-01-15 01:33:35 +08:00
|
|
|
fs.Infof(srcDir, "merging %q", f.opt.Enc.ToStandardName(info.GetName()))
|
2017-10-15 18:45:39 +08:00
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
|
|
|
err = f.srv.Move(info, dstDirNode)
|
2021-03-16 23:50:02 +08:00
|
|
|
return shouldRetry(ctx, err)
|
2017-10-15 18:45:39 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
2020-01-15 01:33:35 +08:00
|
|
|
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", f.opt.Enc.ToStandardName(info.GetName()), srcDir)
|
2017-10-15 18:45:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// rmdir (into trash) the now empty source directory
|
|
|
|
fs.Infof(srcDir, "removing empty directory")
|
2021-03-16 23:50:02 +08:00
|
|
|
err = f.deleteNode(ctx, srcDirNode)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
2019-02-08 01:41:17 +08:00
|
|
|
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
2017-10-15 18:45:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// About gets quota information
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
2017-10-15 18:45:39 +08:00
|
|
|
var q mega.QuotaResp
|
|
|
|
var err error
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
|
|
|
q, err = f.srv.GetQuota()
|
2021-03-16 23:50:02 +08:00
|
|
|
return shouldRetry(ctx, err)
|
2017-10-15 18:45:39 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to get Mega Quota")
|
|
|
|
}
|
|
|
|
usage := &fs.Usage{
|
|
|
|
Total: fs.NewUsageValue(int64(q.Mstrg)), // quota of bytes that can be used
|
|
|
|
Used: fs.NewUsageValue(int64(q.Cstrg)), // bytes in use
|
|
|
|
Free: fs.NewUsageValue(int64(q.Mstrg - q.Cstrg)), // bytes which can be uploaded before reaching the quota
|
|
|
|
}
|
|
|
|
return usage, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ------------------------------------------------------------
|
|
|
|
|
|
|
|
// Fs returns the parent Fs
|
|
|
|
func (o *Object) Fs() fs.Info {
|
|
|
|
return o.fs
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return a string version
|
|
|
|
func (o *Object) String() string {
|
|
|
|
if o == nil {
|
|
|
|
return "<nil>"
|
|
|
|
}
|
|
|
|
return o.remote
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remote returns the remote path
|
|
|
|
func (o *Object) Remote() string {
|
|
|
|
return o.remote
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hash returns the hashes of an object
|
2019-06-17 16:34:30 +08:00
|
|
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
2017-10-15 18:45:39 +08:00
|
|
|
return "", hash.ErrUnsupported
|
|
|
|
}
|
|
|
|
|
|
|
|
// Size returns the size of an object in bytes
|
|
|
|
func (o *Object) Size() int64 {
|
|
|
|
return o.info.GetSize()
|
|
|
|
}
|
|
|
|
|
|
|
|
// setMetaData sets the metadata from info
|
|
|
|
func (o *Object) setMetaData(info *mega.Node) (err error) {
|
|
|
|
if info.GetType() != mega.FILE {
|
|
|
|
return fs.ErrorNotAFile
|
|
|
|
}
|
|
|
|
o.info = info
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// readMetaData gets the metadata if it hasn't already been fetched
|
|
|
|
//
|
|
|
|
// it also sets the info
|
2021-03-16 23:50:02 +08:00
|
|
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
2017-10-15 18:45:39 +08:00
|
|
|
if o.info != nil {
|
|
|
|
return nil
|
|
|
|
}
|
2021-03-16 23:50:02 +08:00
|
|
|
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
if err == fs.ErrorDirNotFound {
|
|
|
|
err = fs.ErrorObjectNotFound
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return o.setMetaData(info)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ModTime returns the modification time of the object
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// It attempts to read the objects mtime and if that isn't present the
|
|
|
|
// LastModified returned in the http headers
|
2019-06-17 16:34:30 +08:00
|
|
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
2017-10-15 18:45:39 +08:00
|
|
|
return o.info.GetTimeStamp()
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetModTime sets the modification time of the local fs object
|
2019-06-17 16:34:30 +08:00
|
|
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
2017-10-15 18:45:39 +08:00
|
|
|
return fs.ErrorCantSetModTime
|
|
|
|
}
|
|
|
|
|
|
|
|
// Storable returns a boolean showing whether this object storable
|
|
|
|
func (o *Object) Storable() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// openObject represents a download in progress
|
|
|
|
type openObject struct {
|
2021-03-16 23:50:02 +08:00
|
|
|
ctx context.Context
|
2017-10-15 18:45:39 +08:00
|
|
|
mu sync.Mutex
|
|
|
|
o *Object
|
|
|
|
d *mega.Download
|
|
|
|
id int
|
|
|
|
skip int64
|
|
|
|
chunk []byte
|
|
|
|
closed bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// get the next chunk
|
2021-03-16 23:50:02 +08:00
|
|
|
func (oo *openObject) getChunk(ctx context.Context) (err error) {
|
2017-10-15 18:45:39 +08:00
|
|
|
if oo.id >= oo.d.Chunks() {
|
|
|
|
return io.EOF
|
|
|
|
}
|
|
|
|
var chunk []byte
|
|
|
|
err = oo.o.fs.pacer.Call(func() (bool, error) {
|
|
|
|
chunk, err = oo.d.DownloadChunk(oo.id)
|
2021-03-16 23:50:02 +08:00
|
|
|
return shouldRetry(ctx, err)
|
2017-10-15 18:45:39 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
oo.id++
|
|
|
|
oo.chunk = chunk
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read reads up to len(p) bytes into p.
|
|
|
|
func (oo *openObject) Read(p []byte) (n int, err error) {
|
|
|
|
oo.mu.Lock()
|
|
|
|
defer oo.mu.Unlock()
|
|
|
|
if oo.closed {
|
|
|
|
return 0, errors.New("read on closed file")
|
|
|
|
}
|
|
|
|
// Skip data at the start if requested
|
|
|
|
for oo.skip > 0 {
|
|
|
|
_, size, err := oo.d.ChunkLocation(oo.id)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
if oo.skip < int64(size) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
oo.id++
|
|
|
|
oo.skip -= int64(size)
|
|
|
|
}
|
|
|
|
if len(oo.chunk) == 0 {
|
2021-03-16 23:50:02 +08:00
|
|
|
err = oo.getChunk(oo.ctx)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
if oo.skip > 0 {
|
|
|
|
oo.chunk = oo.chunk[oo.skip:]
|
|
|
|
oo.skip = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
n = copy(p, oo.chunk)
|
|
|
|
oo.chunk = oo.chunk[n:]
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close closed the file - MAC errors are reported here
|
|
|
|
func (oo *openObject) Close() (err error) {
|
|
|
|
oo.mu.Lock()
|
|
|
|
defer oo.mu.Unlock()
|
|
|
|
if oo.closed {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
err = oo.o.fs.pacer.Call(func() (bool, error) {
|
|
|
|
err = oo.d.Finish()
|
2021-03-16 23:50:02 +08:00
|
|
|
return shouldRetry(oo.ctx, err)
|
2017-10-15 18:45:39 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "failed to finish download")
|
|
|
|
}
|
|
|
|
oo.closed = true
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open an object for read
|
2019-06-17 16:34:30 +08:00
|
|
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
2017-10-15 18:45:39 +08:00
|
|
|
var offset, limit int64 = 0, -1
|
|
|
|
for _, option := range options {
|
|
|
|
switch x := option.(type) {
|
|
|
|
case *fs.SeekOption:
|
|
|
|
offset = x.Offset
|
|
|
|
case *fs.RangeOption:
|
|
|
|
offset, limit = x.Decode(o.Size())
|
|
|
|
default:
|
|
|
|
if option.Mandatory() {
|
|
|
|
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var d *mega.Download
|
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
|
|
|
d, err = o.fs.srv.NewDownload(o.info)
|
2021-03-16 23:50:02 +08:00
|
|
|
return shouldRetry(ctx, err)
|
2017-10-15 18:45:39 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "open download file failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
oo := &openObject{
|
2021-03-16 23:50:02 +08:00
|
|
|
ctx: ctx,
|
2017-10-15 18:45:39 +08:00
|
|
|
o: o,
|
|
|
|
d: d,
|
|
|
|
skip: offset,
|
|
|
|
}
|
|
|
|
|
|
|
|
return readers.NewLimitedReadCloser(oo, limit), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the object with the contents of the io.Reader, modTime and size
|
|
|
|
//
|
|
|
|
// If existing is set then it updates the object rather than creating a new one
|
|
|
|
//
|
|
|
|
// The new object may have been created if an error is returned
|
2019-06-17 16:34:30 +08:00
|
|
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
2017-10-15 18:45:39 +08:00
|
|
|
size := src.Size()
|
2019-02-07 18:43:31 +08:00
|
|
|
if size < 0 {
|
|
|
|
return errors.New("mega backend can't upload a file of unknown length")
|
|
|
|
}
|
2019-06-17 16:34:30 +08:00
|
|
|
//modTime := src.ModTime(ctx)
|
2017-10-15 18:45:39 +08:00
|
|
|
remote := o.Remote()
|
|
|
|
|
|
|
|
// Create the parent directory
|
2021-03-16 23:50:02 +08:00
|
|
|
dirNode, leaf, err := o.fs.mkdirParent(ctx, remote)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "update make parent dir failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
var u *mega.Upload
|
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
2020-01-15 01:33:35 +08:00
|
|
|
u, err = o.fs.srv.NewUpload(dirNode, o.fs.opt.Enc.FromStandardName(leaf), size)
|
2021-03-16 23:50:02 +08:00
|
|
|
return shouldRetry(ctx, err)
|
2017-10-15 18:45:39 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "upload file failed to create session")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upload the chunks
|
|
|
|
// FIXME do this in parallel
|
|
|
|
for id := 0; id < u.Chunks(); id++ {
|
|
|
|
_, chunkSize, err := u.ChunkLocation(id)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "upload failed to read chunk location")
|
|
|
|
}
|
|
|
|
chunk := make([]byte, chunkSize)
|
|
|
|
_, err = io.ReadFull(in, chunk)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "upload failed to read data")
|
|
|
|
}
|
|
|
|
|
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
|
|
|
err = u.UploadChunk(id, chunk)
|
2021-03-16 23:50:02 +08:00
|
|
|
return shouldRetry(ctx, err)
|
2017-10-15 18:45:39 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "upload file failed to upload chunk")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finish the upload
|
|
|
|
var info *mega.Node
|
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
|
|
|
info, err = u.Finish()
|
2021-03-16 23:50:02 +08:00
|
|
|
return shouldRetry(ctx, err)
|
2017-10-15 18:45:39 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "failed to finish upload")
|
|
|
|
}
|
|
|
|
|
2019-02-08 01:41:17 +08:00
|
|
|
// If the upload succeeded and the original object existed, then delete it
|
2017-10-15 18:45:39 +08:00
|
|
|
if o.info != nil {
|
2021-03-16 23:50:02 +08:00
|
|
|
err = o.fs.deleteNode(ctx, o.info)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "upload failed to remove old version")
|
|
|
|
}
|
|
|
|
o.info = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return o.setMetaData(info)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove an object
|
2019-06-17 16:34:30 +08:00
|
|
|
func (o *Object) Remove(ctx context.Context) error {
|
2021-03-16 23:50:02 +08:00
|
|
|
err := o.fs.deleteNode(ctx, o.info)
|
2017-10-15 18:45:39 +08:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "Remove object failed")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-05-13 16:16:56 +08:00
|
|
|
// ID returns the ID of the Object if known, or "" if not
|
|
|
|
func (o *Object) ID() string {
|
|
|
|
return o.info.GetHash()
|
|
|
|
}
|
|
|
|
|
2017-10-15 18:45:39 +08:00
|
|
|
// Check the interfaces are satisfied
|
|
|
|
var (
|
|
|
|
_ fs.Fs = (*Fs)(nil)
|
|
|
|
_ fs.Purger = (*Fs)(nil)
|
|
|
|
_ fs.Mover = (*Fs)(nil)
|
|
|
|
_ fs.PutUncheckeder = (*Fs)(nil)
|
|
|
|
_ fs.DirMover = (*Fs)(nil)
|
|
|
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
|
|
|
_ fs.PublicLinker = (*Fs)(nil)
|
|
|
|
_ fs.MergeDirser = (*Fs)(nil)
|
|
|
|
_ fs.Abouter = (*Fs)(nil)
|
|
|
|
_ fs.Object = (*Object)(nil)
|
2018-05-13 16:16:56 +08:00
|
|
|
_ fs.IDer = (*Object)(nil)
|
2017-10-15 18:45:39 +08:00
|
|
|
)
|