Add pixeldrain backend
Some checks are pending
Docker beta build / Build image job (push) Waiting to run

This commit adds support for pixeldrain's experimental filesystem API.
This commit is contained in:
Fornax 2024-08-12 14:35:44 +02:00 committed by GitHub
parent bfa3278f30
commit 3b3625037c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 1176 additions and 0 deletions

View File

@ -93,6 +93,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* pCloud [:page_facing_up:](https://rclone.org/pcloud/) * pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox) * Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
* PikPak [:page_facing_up:](https://rclone.org/pikpak/) * PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/) * premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/) * put.io [:page_facing_up:](https://rclone.org/putio/)
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/) * Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)

View File

@ -39,6 +39,7 @@ import (
_ "github.com/rclone/rclone/backend/oracleobjectstorage" _ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud" _ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak" _ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/pixeldrain"
_ "github.com/rclone/rclone/backend/premiumizeme" _ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/protondrive" _ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio" _ "github.com/rclone/rclone/backend/putio"

View File

@ -0,0 +1,397 @@
package pixeldrain
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
)
// FilesystemPath is the object which is returned from the pixeldrain API when
// running the stat command on a path. It includes the node information for all
// the members of the path and for all the children of the requested directory.
type FilesystemPath struct {
Path []FilesystemNode `json:"path"`
BaseIndex int `json:"base_index"`
Children []FilesystemNode `json:"children"`
}
// Base returns the base node of the path, this is the node that the path points
// to
func (fsp *FilesystemPath) Base() FilesystemNode {
return fsp.Path[fsp.BaseIndex]
}
// FilesystemNode is a single node in the pixeldrain filesystem. Usually part of
// a Path or Children slice. The Node is also returned as response from update
// commands, if requested
type FilesystemNode struct {
Type string `json:"type"`
Path string `json:"path"`
Name string `json:"name"`
Created time.Time `json:"created"`
Modified time.Time `json:"modified"`
ModeOctal string `json:"mode_octal"`
// File params
FileSize int64 `json:"file_size"`
FileType string `json:"file_type"`
SHA256Sum string `json:"sha256_sum"`
// ID is only filled in when the file/directory is publicly shared
ID string `json:"id,omitempty"`
}
// ChangeLog is a log of changes that happened in a filesystem. Changes returned
// from the API are on chronological order from old to new. A change log can be
// requested for any directory or file, but change logging needs to be enabled
// with the update API before any log entries will be made. Changes are logged
// for 24 hours after logging was enabled. Each time a change log is requested
// the timer is reset to 24 hours.
type ChangeLog []ChangeLogEntry
// ChangeLogEntry is a single entry in a directory's change log. It contains the
// time at which the change occurred. The path relative to the requested
// directory and the action that was performend (update, move or delete). In
// case of a move operation the new path of the file is stored in the path_new
// field
type ChangeLogEntry struct {
Time time.Time `json:"time"`
Path string `json:"path"`
PathNew string `json:"path_new"`
Action string `json:"action"`
Type string `json:"type"`
}
// UserInfo contains information about the logged in user
type UserInfo struct {
Username string `json:"username"`
Subscription SubscriptionType `json:"subscription"`
StorageSpaceUsed int64 `json:"storage_space_used"`
}
// SubscriptionType contains information about a subscription type. It's not the
// active subscription itself, only the properties of the subscription. Like the
// perks and cost
type SubscriptionType struct {
Name string `json:"name"`
StorageSpace int64 `json:"storage_space"`
}
// APIError is the error type returned by the pixeldrain API
type APIError struct {
StatusCode string `json:"value"`
Message string `json:"message"`
}
func (e APIError) Error() string { return e.StatusCode }
// Generalized errors which are caught in our own handlers and translated to
// more specific errors from the fs package.
var (
errNotFound = errors.New("pd api: path not found")
errExists = errors.New("pd api: node already exists")
errAuthenticationFailed = errors.New("pd api: authentication failed")
)
func apiErrorHandler(resp *http.Response) (err error) {
var e APIError
if err = json.NewDecoder(resp.Body).Decode(&e); err != nil {
return fmt.Errorf("failed to parse error json: %w", err)
}
// We close the body here so that the API handlers can be sure that the
// response body is not still open when an error was returned
if err = resp.Body.Close(); err != nil {
return fmt.Errorf("failed to close resp body: %w", err)
}
if e.StatusCode == "path_not_found" {
return errNotFound
} else if e.StatusCode == "directory_not_empty" {
return fs.ErrorDirectoryNotEmpty
} else if e.StatusCode == "node_already_exists" {
return errExists
} else if e.StatusCode == "authentication_failed" {
return errAuthenticationFailed
} else if e.StatusCode == "permission_denied" {
return fs.ErrorPermissionDenied
}
return e
}
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
}
// shouldRetry returns a boolean as to whether this resp and err deserve to be
// retried. It returns the err as a convenience so it can be used as the return
// value in the pacer function
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// paramsFromMetadata turns the fs.Metadata into instructions the pixeldrain API
// can understand.
func paramsFromMetadata(meta fs.Metadata) (params url.Values) {
params = make(url.Values)
if modified, ok := meta["mtime"]; ok {
params.Set("modified", modified)
}
if created, ok := meta["btime"]; ok {
params.Set("created", created)
}
if mode, ok := meta["mode"]; ok {
params.Set("mode", mode)
}
if shared, ok := meta["shared"]; ok {
params.Set("shared", shared)
}
if loggingEnabled, ok := meta["logging_enabled"]; ok {
params.Set("logging_enabled", loggingEnabled)
}
return params
}
// nodeToObject converts a single FilesystemNode API response to an object. The
// node is usually a single element from a directory listing
func (f *Fs) nodeToObject(node FilesystemNode) (o *Object) {
// Trim the path prefix. The path prefix is hidden from rclone during all
// operations. Saving it here would confuse rclone a lot. So instead we
// strip it here and add it back for every API request we need to perform
node.Path = strings.TrimPrefix(node.Path, f.pathPrefix)
return &Object{fs: f, base: node}
}
func (f *Fs) nodeToDirectory(node FilesystemNode) fs.DirEntry {
return fs.NewDir(strings.TrimPrefix(node.Path, f.pathPrefix), node.Modified).SetID(node.ID)
}
func (f *Fs) escapePath(p string) (out string) {
// Add the path prefix, encode all the parts and combine them together
var parts = strings.Split(f.pathPrefix+p, "/")
for i := range parts {
parts[i] = url.PathEscape(parts[i])
}
return strings.Join(parts, "/")
}
func (f *Fs) put(
ctx context.Context,
path string,
body io.Reader,
meta fs.Metadata,
options []fs.OpenOption,
) (node FilesystemNode, err error) {
var params = paramsFromMetadata(meta)
// Tell the server to automatically create parent directories if they don't
// exist yet
params.Set("make_parents", "true")
return node, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "PUT",
Path: f.escapePath(path),
Body: body,
Parameters: params,
Options: options,
},
nil,
&node,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) read(ctx context.Context, path string, options []fs.OpenOption) (in io.ReadCloser, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &rest.Opts{
Method: "GET",
Path: f.escapePath(path),
Options: options,
})
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
return resp.Body, err
}
func (f *Fs) stat(ctx context.Context, path string) (fsp FilesystemPath, err error) {
return fsp, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "GET",
Path: f.escapePath(path),
// To receive node info from the pixeldrain API you need to add the
// ?stat query. Without it pixeldrain will return the file contents
// in the URL points to a file
Parameters: url.Values{"stat": []string{""}},
},
nil,
&fsp,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) changeLog(ctx context.Context, start, end time.Time) (changeLog ChangeLog, err error) {
return changeLog, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "GET",
Path: f.escapePath(""),
Parameters: url.Values{
"change_log": []string{""},
"start": []string{start.Format(time.RFC3339Nano)},
"end": []string{end.Format(time.RFC3339Nano)},
},
},
nil,
&changeLog,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) update(ctx context.Context, path string, fields fs.Metadata) (node FilesystemNode, err error) {
var params = paramsFromMetadata(fields)
params.Set("action", "update")
return node, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "POST",
Path: f.escapePath(path),
MultipartParams: params,
},
nil,
&node,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) mkdir(ctx context.Context, dir string) (err error) {
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "POST",
Path: f.escapePath(dir),
MultipartParams: url.Values{"action": []string{"mkdirall"}},
NoResponse: true,
},
nil,
nil,
)
return shouldRetry(ctx, resp, err)
})
}
var errIncompatibleSourceFS = errors.New("source filesystem is not the same as target")
// Renames a file on the server side. Can be used for both directories and files
func (f *Fs) rename(ctx context.Context, src fs.Fs, from, to string, meta fs.Metadata) (node FilesystemNode, err error) {
srcFs, ok := src.(*Fs)
if !ok {
// This is not a pixeldrain FS, can't move
return node, errIncompatibleSourceFS
} else if srcFs.opt.RootFolderID != f.opt.RootFolderID {
// Path is not in the same root dir, can't move
return node, errIncompatibleSourceFS
}
var params = paramsFromMetadata(meta)
params.Set("action", "rename")
// The target is always in our own filesystem so here we use our
// own pathPrefix
params.Set("target", f.pathPrefix+to)
// Create parent directories if the parent directory of the file
// does not exist yet
params.Set("make_parents", "true")
return node, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "POST",
// Important: We use the source FS path prefix here
Path: srcFs.escapePath(from),
MultipartParams: params,
},
nil,
&node,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) delete(ctx context.Context, path string, recursive bool) (err error) {
var params url.Values
if recursive {
// Tell the server to recursively delete all child files
params = url.Values{"recursive": []string{"true"}}
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "DELETE",
Path: f.escapePath(path),
Parameters: params,
NoResponse: true,
},
nil, nil,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) userInfo(ctx context.Context) (user UserInfo, err error) {
return user, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "GET",
// The default RootURL points at the filesystem endpoint. We can't
// use that to request user information. So here we override it to
// the user endpoint
RootURL: f.opt.APIURL + "/user",
},
nil,
&user,
)
return shouldRetry(ctx, resp, err)
})
}

View File

@ -0,0 +1,567 @@
// Package pixeldrain provides an interface to the Pixeldrain object storage
// system.
package pixeldrain
import (
"context"
"errors"
"fmt"
"io"
"path"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
timeFormat = time.RFC3339Nano
minSleep = pacer.MinSleep(10 * time.Millisecond)
maxSleep = pacer.MaxSleep(1 * time.Second)
decayConstant = pacer.DecayConstant(2) // bigger for slower decay, exponential
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "pixeldrain",
Description: "Pixeldrain Filesystem",
NewFs: NewFs,
Config: nil,
Options: []fs.Option{{
Name: "api_key",
Help: "API key for your pixeldrain account.\n" +
"Found on https://pixeldrain.com/user/api_keys.",
Sensitive: true,
}, {
Name: "root_folder_id",
Help: "Root of the filesystem to use.\n\n" +
"Set to 'me' to use your personal filesystem. " +
"Set to a shared directory ID to use a shared directory.",
Default: "me",
}, {
Name: "api_url",
Help: "The API endpoint to connect to. In the vast majority of cases it's fine to leave\n" +
"this at default. It is only intended to be changed for testing purposes.",
Default: "https://pixeldrain.com/api",
Advanced: true,
Required: true,
}},
MetadataInfo: &fs.MetadataInfo{
System: map[string]fs.MetadataHelp{
"mode": {
Help: "File mode",
Type: "octal, unix style",
Example: "755",
},
"mtime": {
Help: "Time of last modification",
Type: "RFC 3339",
Example: timeFormat,
},
"btime": {
Help: "Time of file birth (creation)",
Type: "RFC 3339",
Example: timeFormat,
},
},
Help: "Pixeldrain supports file modes and creation times.",
},
})
}
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
RootFolderID string `config:"root_folder_id"`
APIURL string `config:"api_url"`
}
// Fs represents a remote box
type Fs struct {
name string // name of this remote, as given to NewFS
root string // the path we are working on, as given to NewFS
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
pacer *fs.Pacer
loggedIn bool // if the user is authenticated
// Pathprefix is the directory we're working in. The pathPrefix is stripped
// from every API response containing a path. The pathPrefix always begins
// and ends with a slash for concatenation convenience
pathPrefix string
}
// Object describes a pixeldrain file
type Object struct {
fs *Fs // what this object is part of
base FilesystemNode // the node this object references
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(apiErrorHandler),
pacer: fs.NewPacer(ctx, pacer.NewDefault(minSleep, maxSleep, decayConstant)),
}
f.features = (&fs.Features{
ReadMimeType: true,
CanHaveEmptyDirectories: true,
ReadMetadata: true,
WriteMetadata: true,
}).Fill(ctx, f)
// Set the path prefix. This is the path to the root directory on the
// server. We add it to each request and strip it from each response because
// rclone does not want to see it
f.pathPrefix = "/" + path.Join(opt.RootFolderID, f.root) + "/"
// The root URL equates to https://pixeldrain.com/api/filesystem during
// normal operation. API handlers need to manually add the pathPrefix to
// each request
f.srv.SetRoot(opt.APIURL + "/filesystem")
// If using an APIKey, set the Authorization header
if len(opt.APIKey) > 0 {
f.srv.SetUserPass("", opt.APIKey)
// Check if credentials are correct
user, err := f.userInfo(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get user data: %w", err)
}
f.loggedIn = true
fs.Infof(f,
"Logged in as '%s', subscription '%s', storage limit %d",
user.Username, user.Subscription.Name, user.Subscription.StorageSpace,
)
}
if !f.loggedIn && opt.RootFolderID == "me" {
return nil, errors.New("authentication required: the 'me' directory can only be accessed while logged in")
}
// Satisfy TestFsIsFile. This test expects that we throw an error if the
// filesystem root is a file
fsp, err := f.stat(ctx, "")
if err != errNotFound && err != nil {
// It doesn't matter if the root directory does not exist, as long as it
// is not a file. This is what the test dictates
return f, err
} else if err == nil && fsp.Base().Type == "file" {
// The filesystem root is a file, rclone wants us to set the root to the
// parent directory
f.root = path.Dir(f.root)
f.pathPrefix = "/" + path.Join(opt.RootFolderID, f.root) + "/"
return f, fs.ErrorIsFile
}
return f, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
fsp, err := f.stat(ctx, dir)
if err == errNotFound {
return nil, fs.ErrorDirNotFound
} else if err != nil {
return nil, err
} else if fsp.Base().Type == "file" {
return nil, fs.ErrorIsFile
}
entries = make(fs.DirEntries, len(fsp.Children))
for i := range fsp.Children {
if fsp.Children[i].Type == "dir" {
entries[i] = f.nodeToDirectory(fsp.Children[i])
} else {
entries[i] = f.nodeToObject(fsp.Children[i])
}
}
return entries, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fsp, err := f.stat(ctx, remote)
if err == errNotFound {
return nil, fs.ErrorObjectNotFound
} else if err != nil {
return nil, err
} else if fsp.Base().Type == "dir" {
return nil, fs.ErrorIsDir
}
return f.nodeToObject(fsp.Base()), nil
}
// Put the object
//
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
if err != nil {
return nil, fmt.Errorf("failed to get object metadata")
}
// Overwrite the mtime if it was not already set in the metadata
if _, ok := meta["mtime"]; !ok {
if meta == nil {
meta = make(fs.Metadata)
}
meta["mtime"] = src.ModTime(ctx).Format(timeFormat)
}
node, err := f.put(ctx, src.Remote(), in, meta, options)
if err != nil {
return nil, fmt.Errorf("failed to put object: %w", err)
}
return f.nodeToObject(node), nil
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
err = f.mkdir(ctx, dir)
if err == errNotFound {
return fs.ErrorDirNotFound
} else if err == errExists {
// Spec says we do not return an error if the directory already exists
return nil
}
return err
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
err = f.delete(ctx, dir, false)
if err == errNotFound {
return fs.ErrorDirNotFound
}
return err
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string { return f.name }
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string { return f.root }
// String converts this Fs to a string
func (f *Fs) String() string { return fmt.Sprintf("pixeldrain root '%s'", f.root) }
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration { return time.Millisecond }
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set { return hash.Set(hash.SHA256) }
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features { return f.features }
// Purge all files in the directory specified
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
err = f.delete(ctx, dir, true)
if err == errNotFound {
return fs.ErrorDirNotFound
}
return err
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
// This is not a pixeldrain object. Can't move
return nil, fs.ErrorCantMove
}
node, err := f.rename(ctx, srcObj.fs, srcObj.base.Path, remote, fs.GetConfig(ctx).MetadataSet)
if err == errIncompatibleSourceFS {
return nil, fs.ErrorCantMove
} else if err == errNotFound {
return nil, fs.ErrorObjectNotFound
}
return f.nodeToObject(node), nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
_, err = f.rename(ctx, src, srcRemote, dstRemote, nil)
if err == errIncompatibleSourceFS {
return fs.ErrorCantDirMove
} else if err == errNotFound {
return fs.ErrorDirNotFound
} else if err == errExists {
return fs.ErrorDirExists
}
return err
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implementation must empty the channel
// regularly. When the channel gets closed, the implementation
// should stop polling and release resources.
func (f *Fs) ChangeNotify(ctx context.Context, notify func(string, fs.EntryType), newInterval <-chan time.Duration) {
// If the bucket ID is not /me we need to explicitly enable change logging
// for this directory or file
if f.pathPrefix != "/me/" {
_, err := f.update(ctx, "", fs.Metadata{"logging_enabled": "true"})
if err != nil {
fs.Errorf(f, "Failed to set up change logging for path '%s': %s", f.pathPrefix, err)
}
}
go f.changeNotify(ctx, notify, newInterval)
}
func (f *Fs) changeNotify(ctx context.Context, notify func(string, fs.EntryType), newInterval <-chan time.Duration) {
var ticker = time.NewTicker(<-newInterval)
var lastPoll = time.Now()
for {
select {
case dur, ok := <-newInterval:
if !ok {
ticker.Stop()
return
}
fs.Debugf(f, "Polling changes at an interval of %s", dur)
ticker.Reset(dur)
case t := <-ticker.C:
clog, err := f.changeLog(ctx, lastPoll, t)
if err != nil {
fs.Errorf(f, "Failed to get change log for path '%s': %s", f.pathPrefix, err)
continue
}
for i := range clog {
fs.Debugf(f, "Path '%s' (%s) changed (%s) in directory '%s'",
clog[i].Path, clog[i].Type, clog[i].Action, f.pathPrefix)
if clog[i].Type == "dir" {
notify(strings.TrimPrefix(clog[i].Path, "/"), fs.EntryDirectory)
} else if clog[i].Type == "file" {
notify(strings.TrimPrefix(clog[i].Path, "/"), fs.EntryObject)
}
}
lastPoll = t
}
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Put already supports streaming so we just use that
return f.Put(ctx, in, src, options...)
}
// DirSetModTime sets the mtime metadata on a directory
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) (err error) {
_, err = f.update(ctx, dir, fs.Metadata{"mtime": modTime.Format(timeFormat)})
return err
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
fsn, err := f.update(ctx, remote, fs.Metadata{"shared": strconv.FormatBool(!unlink)})
if err != nil {
return "", err
}
if fsn.ID != "" {
return strings.Replace(f.opt.APIURL, "/api", "/d/", 1) + fsn.ID, nil
}
return "", nil
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
user, err := f.userInfo(ctx)
if err != nil {
return nil, fmt.Errorf("failed to read user info: %w", err)
}
usage = &fs.Usage{Used: fs.NewUsageValue(user.StorageSpaceUsed)}
if user.Subscription.StorageSpace > -1 {
usage.Total = fs.NewUsageValue(user.Subscription.StorageSpace)
}
return usage, nil
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
_, err = o.fs.update(ctx, o.base.Path, fs.Metadata{"mtime": modTime.Format(timeFormat)})
if err == nil {
o.base.Modified = modTime
}
return err
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
return o.fs.read(ctx, o.base.Path, options)
}
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one.
//
// The new object may have been created if an error is returned.
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
// Copy the parameters and update the object
o.base.Modified = src.ModTime(ctx)
o.base.FileSize = src.Size()
o.base.SHA256Sum, _ = src.Hash(ctx, hash.SHA256)
_, err = o.fs.Put(ctx, in, o, options...)
return err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.delete(ctx, o.base.Path, false)
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns the SHA-256 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.SHA256 {
return "", hash.ErrUnsupported
}
return o.base.SHA256Sum, nil
}
// Storable returns a boolean showing whether this object storable
func (o *Object) Storable() bool {
return true
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.base.Path
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.base.Path
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.base.Modified
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.base.FileSize
}
// MimeType returns the content type of the Object if known, or "" if not
func (o *Object) MimeType(ctx context.Context) string {
return o.base.FileType
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return fs.Metadata{
"mode": o.base.ModeOctal,
"mtime": o.base.Modified.Format(timeFormat),
"btime": o.base.Created.Format(timeFormat),
}, nil
}
// Verify that all the interfaces are implemented correctly
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Info = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.DirEntry = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.Metadataer = (*Object)(nil)
)

View File

@ -0,0 +1,18 @@
// Test pixeldrain filesystem interface
package pixeldrain_test
import (
"testing"
"github.com/rclone/rclone/backend/pixeldrain"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestPixeldrain:",
NilObject: (*pixeldrain.Object)(nil),
SkipInvalidUTF8: true, // Pixeldrain throws an error on invalid utf-8
})
}

View File

@ -69,6 +69,7 @@ docs = [
"swift.md", "swift.md",
"pcloud.md", "pcloud.md",
"pikpak.md", "pikpak.md",
"pixeldrain.md",
"premiumizeme.md", "premiumizeme.md",
"protondrive.md", "protondrive.md",
"putio.md", "putio.md",

View File

@ -161,6 +161,7 @@ WebDAV or S3, that work out of the box.)
{{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}} {{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}}
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}} {{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
{{< provider name="PikPak" home="https://mypikpak.com/" config="/pikpak/" >}} {{< provider name="PikPak" home="https://mypikpak.com/" config="/pikpak/" >}}
{{< provider name="Pixeldrain" home="https://pixeldrain.com/" config="/pixeldrain/" >}}
{{< provider name="premiumize.me" home="https://premiumize.me/" config="/premiumizeme/" >}} {{< provider name="premiumize.me" home="https://premiumize.me/" config="/premiumizeme/" >}}
{{< provider name="put.io" home="https://put.io/" config="/putio/" >}} {{< provider name="put.io" home="https://put.io/" config="/putio/" >}}
{{< provider name="Proton Drive" home="https://proton.me/drive" config="/protondrive/" >}} {{< provider name="Proton Drive" home="https://proton.me/drive" config="/protondrive/" >}}

View File

@ -65,6 +65,7 @@ See the following for detailed instructions for
* [Oracle Object Storage](/oracleobjectstorage/) * [Oracle Object Storage](/oracleobjectstorage/)
* [Pcloud](/pcloud/) * [Pcloud](/pcloud/)
* [PikPak](/pikpak/) * [PikPak](/pikpak/)
* [Pixeldrain](/pixeldrain/)
* [premiumize.me](/premiumizeme/) * [premiumize.me](/premiumizeme/)
* [put.io](/putio/) * [put.io](/putio/)
* [Proton Drive](/protondrive/) * [Proton Drive](/protondrive/)

View File

@ -46,6 +46,7 @@ Here is an overview of the major features of each cloud storage system.
| Oracle Object Storage | MD5 | R/W | No | No | R/W | - | | Oracle Object Storage | MD5 | R/W | No | No | R/W | - |
| pCloud | MD5, SHA1 ⁷ | R | No | No | W | - | | pCloud | MD5, SHA1 ⁷ | R | No | No | W | - |
| PikPak | MD5 | R | No | No | R | - | | PikPak | MD5 | R | No | No | R | - |
| Pixeldrain | SHA256 | R/W | No | No | R | RW |
| premiumize.me | - | - | Yes | No | R | - | | premiumize.me | - | - | Yes | No | R | - |
| put.io | CRC-32 | R/W | No | Yes | R | - | | put.io | CRC-32 | R/W | No | Yes | R | - |
| Proton Drive | SHA1 | R/W | No | No | R | - | | Proton Drive | SHA1 | R/W | No | No | R | - |
@ -520,6 +521,7 @@ upon backend-specific capabilities.
| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | No | | Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | No |
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes | | pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
| PikPak | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes | | PikPak | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
| Pixeldrain | Yes | No | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
| premiumize.me | Yes | No | Yes | Yes | No | No | No | No | Yes | Yes | Yes | | premiumize.me | Yes | No | Yes | Yes | No | No | No | No | Yes | Yes | Yes |
| put.io | Yes | No | Yes | Yes | Yes | No | Yes | No | No | Yes | Yes | | put.io | Yes | No | Yes | Yes | Yes | No | Yes | No | No | Yes | Yes |
| Proton Drive | Yes | No | Yes | Yes | Yes | No | No | No | No | Yes | Yes | | Proton Drive | Yes | No | Yes | Yes | Yes | No | No | No | No | Yes | Yes |

181
docs/content/pixeldrain.md Normal file
View File

@ -0,0 +1,181 @@
---
title: "Pixeldrain"
description: "Rclone docs for Pixeldrain"
versionIntroduced: "v1.68"
---
# {{< icon "fa fa-circle" >}} Pixeldrain
This is the backend for Pixeldrain's premium filesystem feature. This is not the
same as pixeldrain's free file sharing product. The filesystem requires either a
Pro subscription or the Prepaid plan. [More information on
subscriptions](https://pixeldrain.com/#pro).
An overview of the filesystem's features and limitations is available in the
[filesystem guide](https://pixeldrain.com/filesystem) on pixeldrain.
### Usage with account
To use the personal filesystem you will need a [pixeldrain
account](https://pixeldrain.com/register) and either the Prepaid plan or one of
the Patreon-based subscriptions. After registering and subscribing, your
personal filesystem will be available at this link: https://pixeldrain.com/d/me.
Go to the [API keys page](https://pixeldrain.com/user/api_keys) on your account
and generate a new API key for rclone. Then run `rclone config` and use the API
key to create a new backend.
Example:
```
No remotes found, make a new one?
n) New remote
d) Delete remote
c) Copy remote
s) Set configuration password
q) Quit config
n/d/c/s/q> n
Enter name for new remote.
name> pixeldrainfs
Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
...
XX / Pixeldrain Filesystem
\ (pixeldrain)
...
Storage> pixeldrain
Option api_key.
API key for your pixeldrain account.
Found on https://pixeldrain.com/user/api_keys.
Enter a value. Press Enter to leave empty.
api_key> b1bb1e81-9b7b-406b-986a-c9b20be76e15
Option directory_id.
Root of the filesystem to use. Set to 'me' to use your personal filesystem.
Set to a shared directory ID to use a shared directory.
Enter a string value. Press Enter for the default (me).
directory_id>
Edit advanced config?
y) Yes
n) No (default)
y/n>
Configuration complete.
Options:
- type: pixeldrain
- api_key: b1bb1e81-9b7b-406b-986a-c9b20be76e15
Keep this "pixeldrainfs" remote?
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d>
Current remotes:
Name Type
==== ====
pixeldrainfs pixeldrain
e) Edit existing remote
n) New remote
d) Delete remote
r) Rename remote
c) Copy remote
s) Set configuration password
q) Quit config
e/n/d/r/c/s/q> q
```
### Usage without account
It is possible to gain read-only access to publicly shared directories through
rclone. For this you only need a directory ID. The directory ID can be found in
the URL of a shared directory, the URL will look like this
`https://pixeldrain.com/d/abcd1234` where `abcd1234` is the directory ID.
Directory IDs in your own filesystem can also be listed with the `lsf` command:
`rclone lsf Pixeldrain: --dirs-only -Fpi`
This will print directories in your `Pixeldrain` home directory and their public
IDs.
Enter this directory ID in the rclone config and you will be able to access the
directory.
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/pixeldrain/pixeldrain.go then run make backenddocs" >}}
### Standard options
Here are the Standard options specific to pixeldrain (Pixeldrain Filesystem).
#### --pixeldrain-api-key
API key for your pixeldrain account.
Found on https://pixeldrain.com/user/api_keys.
Properties:
- Config: api_key
- Env Var: RCLONE_PIXELDRAIN_API_KEY
- Type: string
- Required: false
#### --pixeldrain-root-folder-id
Root of the filesystem to use.
Set to 'me' to use your personal filesystem. Set to a shared directory ID to use a shared directory.
Properties:
- Config: root_folder_id
- Env Var: RCLONE_PIXELDRAIN_ROOT_FOLDER_ID
- Type: string
- Default: "me"
### Advanced options
Here are the Advanced options specific to pixeldrain (Pixeldrain Filesystem).
#### --pixeldrain-api-url
The API endpoint to connect to. In the vast majority of cases it's fine to leave
this at default. It is only intended to be changed for testing purposes.
Properties:
- Config: api_url
- Env Var: RCLONE_PIXELDRAIN_API_URL
- Type: string
- Default: "https://pixeldrain.com/api"
#### --pixeldrain-description
Description of the remote.
Properties:
- Config: description
- Env Var: RCLONE_PIXELDRAIN_DESCRIPTION
- Type: string
- Required: false
### Metadata
Pixeldrain supports file modes and creation times.
Here are the possible system metadata items for the pixeldrain backend.
| Name | Help | Type | Example | Read Only |
|------|------|------|---------|-----------|
| btime | Time of file birth (creation) | RFC 3339 | 2006-01-02T15:04:05.999999999Z07:00 | N |
| mode | File mode | octal, unix style | 755 | N |
| mtime | Time of last modification | RFC 3339 | 2006-01-02T15:04:05.999999999Z07:00 | N |
See the [metadata](/docs/#metadata) docs for more info.
{{< rem autogenerated options stop >}}

View File

@ -90,6 +90,7 @@
<a class="dropdown-item" href="/oracleobjectstorage/"><i class="fa fa-cloud fa-fw"></i> Oracle Object Storage</a> <a class="dropdown-item" href="/oracleobjectstorage/"><i class="fa fa-cloud fa-fw"></i> Oracle Object Storage</a>
<a class="dropdown-item" href="/pcloud/"><i class="fa fa-cloud fa-fw"></i> pCloud</a> <a class="dropdown-item" href="/pcloud/"><i class="fa fa-cloud fa-fw"></i> pCloud</a>
<a class="dropdown-item" href="/pikpak/"><i class="fa fa-cloud fa-fw"></i> PikPak</a> <a class="dropdown-item" href="/pikpak/"><i class="fa fa-cloud fa-fw"></i> PikPak</a>
<a class="dropdown-item" href="/pixeldrain/"><i class="fa fa-circle fa-fw"></i> Pixeldrain</a>
<a class="dropdown-item" href="/premiumizeme/"><i class="fa fa-user fa-fw"></i> premiumize.me</a> <a class="dropdown-item" href="/premiumizeme/"><i class="fa fa-user fa-fw"></i> premiumize.me</a>
<a class="dropdown-item" href="/putio/"><i class="fas fa-parking fa-fw"></i> put.io</a> <a class="dropdown-item" href="/putio/"><i class="fas fa-parking fa-fw"></i> put.io</a>
<a class="dropdown-item" href="/protondrive/"><i class="fas fa-folder fa-fw"></i> Proton Drive</a> <a class="dropdown-item" href="/protondrive/"><i class="fas fa-folder fa-fw"></i> Proton Drive</a>

View File

@ -364,6 +364,11 @@ backends:
# This test fails with message # This test fails with message
# "share_status_prohibited" (9): Sorry, the sharing service is under maintenance in the current region. # "share_status_prohibited" (9): Sorry, the sharing service is under maintenance in the current region.
- TestIntegration/FsMkdir/FsPutFiles/PublicLink - TestIntegration/FsMkdir/FsPutFiles/PublicLink
- backend: "pixeldrain"
remote: "TestPixeldrain:"
ignore:
- TestIntegration/FsMkdir/FsEncoding/invalid_UTF-8
fastlist: false
- backend: "webdav" - backend: "webdav"
remote: "TestWebdavNextcloud:" remote: "TestWebdavNextcloud:"
ignore: ignore: