mirror of
https://github.com/rclone/rclone.git
synced 2024-11-22 09:32:29 +08:00
backend: adjust backends to have encoding parameter
Fixes #3761 Fixes #3836 Fixes #3841
This commit is contained in:
parent
0a5c83ece1
commit
3c620d521d
|
@ -33,13 +33,13 @@ import (
|
|||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
enc = encodings.AmazonCloudDrive
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
statusAvailable = "AVAILABLE"
|
||||
|
@ -137,15 +137,21 @@ which downloads the file through a temporary URL directly from the
|
|||
underlying S3 storage.`,
|
||||
Default: defaultTempLinkThreshold,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.AmazonCloudDrive,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Checkpoint string `config:"checkpoint"`
|
||||
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
|
||||
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
|
||||
Checkpoint string `config:"checkpoint"`
|
||||
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
|
||||
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote acd server
|
||||
|
@ -386,7 +392,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
|||
var resp *http.Response
|
||||
var subFolder *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
subFolder, resp, err = folder.GetFolder(enc.FromStandardName(leaf))
|
||||
subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf))
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -413,7 +419,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||
var resp *http.Response
|
||||
var info *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.CreateFolder(enc.FromStandardName(leaf))
|
||||
info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf))
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -481,7 +487,7 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
|||
if !hasValidParent {
|
||||
continue
|
||||
}
|
||||
*node.Name = enc.ToStandardName(*node.Name)
|
||||
*node.Name = f.opt.Enc.ToStandardName(*node.Name)
|
||||
// Store the nodes up in case we have to retry the listing
|
||||
out = append(out, node)
|
||||
}
|
||||
|
@ -671,7 +677,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
start := time.Now()
|
||||
f.tokenRenewer.Start()
|
||||
info, resp, err = folder.Put(in, enc.FromStandardName(leaf))
|
||||
info, resp, err = folder.Put(in, f.opt.Enc.FromStandardName(leaf))
|
||||
f.tokenRenewer.Stop()
|
||||
var ok bool
|
||||
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
|
||||
|
@ -1041,7 +1047,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||
var resp *http.Response
|
||||
var info *acd.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.GetFile(enc.FromStandardName(leaf))
|
||||
info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf))
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -1161,7 +1167,7 @@ func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
|
|||
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newInfo, resp, err = info.Rename(enc.FromStandardName(newName))
|
||||
newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName))
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
return newInfo, err
|
||||
|
@ -1357,7 +1363,7 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoin
|
|||
if len(node.Parents) > 0 {
|
||||
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
|
||||
// and append the drive file name to compute the full file name
|
||||
name := enc.ToStandardName(*node.Name)
|
||||
name := f.opt.Enc.ToStandardName(*node.Name)
|
||||
if len(path) > 0 {
|
||||
path = path + "/" + name
|
||||
} else {
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
|
@ -34,6 +35,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
|
@ -61,8 +63,6 @@ const (
|
|||
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
||||
)
|
||||
|
||||
const enc = encodings.AzureBlob
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
|
@ -127,21 +127,27 @@ If blobs are in "archive tier" at remote, trying to perform data transfer
|
|||
operations from remote will not be allowed. User should first restore by
|
||||
tiering blob to "Hot" or "Cool".`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.AzureBlob,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ListChunkSize uint `config:"list_chunk"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
UseEmulator bool `config:"use_emulator"`
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ListChunkSize uint `config:"list_chunk"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
UseEmulator bool `config:"use_emulator"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote azure server
|
||||
|
@ -212,7 +218,7 @@ func parsePath(path string) (root string) {
|
|||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
|
||||
containerName, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(containerName), enc.FromStandardPath(containerPath)
|
||||
return f.opt.Enc.FromStandardName(containerName), f.opt.Enc.FromStandardPath(containerPath)
|
||||
}
|
||||
|
||||
// split returns container and containerPath from the object
|
||||
|
@ -588,7 +594,7 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
|||
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||
// return nil
|
||||
// }
|
||||
remote := enc.ToStandardPath(file.Name)
|
||||
remote := f.opt.Enc.ToStandardPath(file.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Debugf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
|
@ -609,7 +615,7 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
|||
// Send the subdirectories
|
||||
for _, remote := range response.Segment.BlobPrefixes {
|
||||
remote := strings.TrimRight(remote.Name, "/")
|
||||
remote = enc.ToStandardPath(remote)
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Debugf(f, "Odd directory name received %q", remote)
|
||||
continue
|
||||
|
@ -673,7 +679,7 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
|||
return entries, nil
|
||||
}
|
||||
err = f.listContainersToFn(func(container *azblob.ContainerItem) error {
|
||||
d := fs.NewDir(enc.ToStandardName(container.Name), container.Properties.LastModified)
|
||||
d := fs.NewDir(f.opt.Enc.ToStandardName(container.Name), container.Properties.LastModified)
|
||||
f.cache.MarkOK(container.Name)
|
||||
entries = append(entries, d)
|
||||
return nil
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
|
@ -31,12 +32,11 @@ import (
|
|||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const enc = encodings.B2
|
||||
|
||||
const (
|
||||
defaultEndpoint = "https://api.backblazeb2.com"
|
||||
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
|
||||
|
@ -146,23 +146,29 @@ The duration before the download authorization token will expire.
|
|||
The minimum value is 1 second. The maximum value is one week.`,
|
||||
Default: fs.Duration(7 * 24 * time.Hour),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.B2,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
|
@ -402,7 +408,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
|
||||
allowedBucket := enc.ToStandardName(f.info.Allowed.BucketName)
|
||||
allowedBucket := f.opt.Enc.ToStandardName(f.info.Allowed.BucketName)
|
||||
if allowedBucket == "" {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
|
@ -623,11 +629,11 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||
var request = api.ListFileNamesRequest{
|
||||
BucketID: bucketID,
|
||||
MaxFileCount: chunkSize,
|
||||
Prefix: enc.FromStandardPath(directory),
|
||||
Prefix: f.opt.Enc.FromStandardPath(directory),
|
||||
Delimiter: delimiter,
|
||||
}
|
||||
if directory != "" {
|
||||
request.StartFileName = enc.FromStandardPath(directory)
|
||||
request.StartFileName = f.opt.Enc.FromStandardPath(directory)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
@ -647,7 +653,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||
}
|
||||
for i := range response.Files {
|
||||
file := &response.Files[i]
|
||||
file.Name = enc.ToStandardPath(file.Name)
|
||||
file.Name = f.opt.Enc.ToStandardPath(file.Name)
|
||||
// Finish if file name no longer has prefix
|
||||
if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||
return nil
|
||||
|
@ -848,7 +854,7 @@ func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
|
|||
f._bucketType = make(map[string]string, 1)
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
bucket.Name = enc.ToStandardName(bucket.Name)
|
||||
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
|
||||
f.cache.MarkOK(bucket.Name)
|
||||
f._bucketID[bucket.Name] = bucket.ID
|
||||
f._bucketType[bucket.Name] = bucket.Type
|
||||
|
@ -970,7 +976,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
|||
}
|
||||
var request = api.CreateBucketRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
Name: enc.FromStandardName(bucket),
|
||||
Name: f.opt.Enc.FromStandardName(bucket),
|
||||
Type: "allPrivate",
|
||||
}
|
||||
var response api.Bucket
|
||||
|
@ -1054,7 +1060,7 @@ func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
|
|||
}
|
||||
var request = api.HideFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: enc.FromStandardPath(bucketPath),
|
||||
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
||||
}
|
||||
var response api.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
|
@ -1082,7 +1088,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
|
|||
}
|
||||
var request = api.DeleteFileRequest{
|
||||
ID: ID,
|
||||
Name: enc.FromStandardPath(Name),
|
||||
Name: f.opt.Enc.FromStandardPath(Name),
|
||||
}
|
||||
var response api.File
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
|
@ -1220,7 +1226,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
}
|
||||
var request = api.CopyFileRequest{
|
||||
SourceID: srcObj.id,
|
||||
Name: enc.FromStandardPath(dstPath),
|
||||
Name: f.opt.Enc.FromStandardPath(dstPath),
|
||||
MetadataDirective: "COPY",
|
||||
DestBucketID: destBucketID,
|
||||
}
|
||||
|
@ -1268,7 +1274,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
|
|||
}
|
||||
var request = api.GetDownloadAuthorizationRequest{
|
||||
BucketID: bucketID,
|
||||
FileNamePrefix: enc.FromStandardPath(path.Join(f.root, remote)),
|
||||
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
|
||||
ValidDurationInSeconds: validDurationInSeconds,
|
||||
}
|
||||
var response api.GetDownloadAuthorizationResponse
|
||||
|
@ -1509,7 +1515,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|||
}
|
||||
var request = api.CopyFileRequest{
|
||||
SourceID: o.id,
|
||||
Name: enc.FromStandardPath(bucketPath), // copy to same name
|
||||
Name: o.fs.opt.Enc.FromStandardPath(bucketPath), // copy to same name
|
||||
MetadataDirective: "REPLACE",
|
||||
ContentType: info.ContentType,
|
||||
Info: info.Info,
|
||||
|
@ -1611,7 +1617,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
|
||||
} else {
|
||||
bucket, bucketPath := o.split()
|
||||
opts.Path += "/file/" + urlEncode(enc.FromStandardName(bucket)) + "/" + urlEncode(enc.FromStandardPath(bucketPath))
|
||||
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
|
@ -1808,7 +1814,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
Body: in,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-File-Name": urlEncode(enc.FromStandardPath(bucketPath)),
|
||||
"X-Bz-File-Name": urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath)),
|
||||
"Content-Type": fs.MimeType(ctx, src),
|
||||
sha1Header: calculatedSha1,
|
||||
timeHeader: timeString(modTime),
|
||||
|
|
|
@ -111,7 +111,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||
}
|
||||
var request = api.StartLargeFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: enc.FromStandardPath(bucketPath),
|
||||
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Info: map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
|
||||
"github.com/youmark/pkcs8"
|
||||
|
@ -48,8 +49,6 @@ import (
|
|||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
|
||||
const enc = encodings.Box
|
||||
|
||||
const (
|
||||
rcloneClientID = "d0374ba6pgmaguie02ge15sv1mllndho"
|
||||
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
|
||||
|
@ -146,6 +145,11 @@ func init() {
|
|||
Help: "Max number of times to try committing a multipart file.",
|
||||
Default: 100,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.Box,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
@ -220,8 +224,9 @@ func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err
|
|||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CommitRetries int `config:"commit_retries"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CommitRetries int `config:"commit_retries"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
|
@ -488,7 +493,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||
Parameters: fieldsValue(),
|
||||
}
|
||||
mkdir := api.CreateFolder{
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: pathID,
|
||||
},
|
||||
|
@ -554,7 +559,7 @@ OUTER:
|
|||
if item.ItemStatus != api.ItemStatusActive {
|
||||
continue
|
||||
}
|
||||
item.Name = enc.ToStandardName(item.Name)
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break OUTER
|
||||
|
@ -791,7 +796,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
Parameters: fieldsValue(),
|
||||
}
|
||||
copyFile := api.CopyFile{
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
},
|
||||
|
@ -830,7 +835,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
|
|||
Parameters: fieldsValue(),
|
||||
}
|
||||
move := api.UpdateFileMove{
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
},
|
||||
|
@ -1155,7 +1160,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||
// This is recommended for less than 50 MB of content
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
|
||||
upload := api.UploadFile{
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Name: o.fs.opt.Enc.FromStandardName(leaf),
|
||||
ContentModifiedAt: api.Time(modTime),
|
||||
ContentCreatedAt: api.Time(modTime),
|
||||
Parent: api.Parent{
|
||||
|
|
|
@ -38,7 +38,7 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
|
|||
} else {
|
||||
opts.Path = "/files/upload_sessions"
|
||||
request.FolderID = directoryID
|
||||
request.FileName = enc.FromStandardName(leaf)
|
||||
request.FileName = o.fs.opt.Enc.FromStandardName(leaf)
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
|
|
|
@ -39,6 +39,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
|
@ -49,8 +50,6 @@ import (
|
|||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const enc = encodings.Drive
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||
|
@ -456,6 +455,11 @@ Google don't document so it may break in the future.
|
|||
See: https://github.com/rclone/rclone/issues/3857
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.Drive,
|
||||
}},
|
||||
})
|
||||
|
||||
|
@ -475,37 +479,38 @@ See: https://github.com/rclone/rclone/issues/3857
|
|||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Scope string `config:"scope"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
TeamDriveID string `config:"team_drive"`
|
||||
AuthOwnerOnly bool `config:"auth_owner_only"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
SkipGdocs bool `config:"skip_gdocs"`
|
||||
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
|
||||
SharedWithMe bool `config:"shared_with_me"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
Extensions string `config:"formats"`
|
||||
ExportExtensions string `config:"export_formats"`
|
||||
ImportExtensions string `config:"import_formats"`
|
||||
AllowImportNameChange bool `config:"allow_import_name_change"`
|
||||
UseCreatedDate bool `config:"use_created_date"`
|
||||
UseSharedDate bool `config:"use_shared_date"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
AlternateExport bool `config:"alternate_export"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
AcknowledgeAbuse bool `config:"acknowledge_abuse"`
|
||||
KeepRevisionForever bool `config:"keep_revision_forever"`
|
||||
SizeAsQuota bool `config:"size_as_quota"`
|
||||
V2DownloadMinSize fs.SizeSuffix `config:"v2_download_min_size"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
PacerBurst int `config:"pacer_burst"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
||||
Scope string `config:"scope"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
TeamDriveID string `config:"team_drive"`
|
||||
AuthOwnerOnly bool `config:"auth_owner_only"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
SkipGdocs bool `config:"skip_gdocs"`
|
||||
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
|
||||
SharedWithMe bool `config:"shared_with_me"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
Extensions string `config:"formats"`
|
||||
ExportExtensions string `config:"export_formats"`
|
||||
ImportExtensions string `config:"import_formats"`
|
||||
AllowImportNameChange bool `config:"allow_import_name_change"`
|
||||
UseCreatedDate bool `config:"use_created_date"`
|
||||
UseSharedDate bool `config:"use_shared_date"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
AlternateExport bool `config:"alternate_export"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
AcknowledgeAbuse bool `config:"acknowledge_abuse"`
|
||||
KeepRevisionForever bool `config:"keep_revision_forever"`
|
||||
SizeAsQuota bool `config:"size_as_quota"`
|
||||
V2DownloadMinSize fs.SizeSuffix `config:"v2_download_min_size"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
PacerBurst int `config:"pacer_burst"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote drive server
|
||||
|
@ -677,7 +682,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
|||
}
|
||||
var stems []string
|
||||
if title != "" {
|
||||
searchTitle := enc.FromStandardName(title)
|
||||
searchTitle := f.opt.Enc.FromStandardName(title)
|
||||
// Escaping the backslash isn't documented but seems to work
|
||||
searchTitle = strings.Replace(searchTitle, `\`, `\\`, -1)
|
||||
searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
|
||||
|
@ -751,7 +756,7 @@ OUTER:
|
|||
return false, errors.Wrap(err, "couldn't list directory")
|
||||
}
|
||||
for _, item := range files.Files {
|
||||
item.Name = enc.ToStandardName(item.Name)
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
// Check the case of items is correct since
|
||||
// the `=` operator is case insensitive.
|
||||
if title != "" && title != item.Name {
|
||||
|
@ -1313,7 +1318,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
|||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
leaf = f.opt.Enc.FromStandardName(leaf)
|
||||
// fmt.Println("Making", path)
|
||||
// Define the metadata for the directory we are going to create.
|
||||
createInfo := &drive.File{
|
||||
|
@ -1771,7 +1776,7 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
|||
return nil, err
|
||||
}
|
||||
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
leaf = f.opt.Enc.FromStandardName(leaf)
|
||||
// Define the metadata for the file we are going to create.
|
||||
createInfo := &drive.File{
|
||||
Name: leaf,
|
||||
|
@ -2470,7 +2475,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
|||
|
||||
// find the new path
|
||||
if change.File != nil {
|
||||
change.File.Name = enc.ToStandardName(change.File.Name)
|
||||
change.File.Name = f.opt.Enc.ToStandardName(change.File.Name)
|
||||
changeType := fs.EntryDirectory
|
||||
if change.File.MimeType != driveFolderType {
|
||||
changeType = fs.EntryObject
|
||||
|
|
|
@ -48,14 +48,13 @@ import (
|
|||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.Dropbox
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneClientID = "5jcck7diasz0rqy"
|
||||
|
@ -147,14 +146,20 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
|||
Help: "Impersonate this user when using a business account.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.Dropbox,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
|
@ -381,7 +386,7 @@ func (f *Fs) setRoot(root string) {
|
|||
func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
|
||||
Path: enc.FromStandardPath(objPath),
|
||||
Path: f.opt.Enc.FromStandardPath(objPath),
|
||||
})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
|
@ -475,7 +480,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
for {
|
||||
if !started {
|
||||
arg := files.ListFolderArg{
|
||||
Path: enc.FromStandardPath(root),
|
||||
Path: f.opt.Enc.FromStandardPath(root),
|
||||
Recursive: false,
|
||||
}
|
||||
if root == "/" {
|
||||
|
@ -525,7 +530,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
|
||||
// Only the last element is reliably cased in PathDisplay
|
||||
entryPath := metadata.PathDisplay
|
||||
leaf := enc.ToStandardName(path.Base(entryPath))
|
||||
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Now())
|
||||
|
@ -583,7 +588,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||
|
||||
// create it
|
||||
arg2 := files.CreateFolderArg{
|
||||
Path: enc.FromStandardPath(root),
|
||||
Path: f.opt.Enc.FromStandardPath(root),
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.CreateFolderV2(&arg2)
|
||||
|
@ -609,7 +614,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
|
||||
root = enc.FromStandardPath(root)
|
||||
root = f.opt.Enc.FromStandardPath(root)
|
||||
// check directory empty
|
||||
arg := files.ListFolderArg{
|
||||
Path: root,
|
||||
|
@ -668,8 +673,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
// Copy
|
||||
arg := files.RelocationArg{
|
||||
RelocationPath: files.RelocationPath{
|
||||
FromPath: enc.FromStandardPath(srcObj.remotePath()),
|
||||
ToPath: enc.FromStandardPath(dstObj.remotePath()),
|
||||
FromPath: f.opt.Enc.FromStandardPath(srcObj.remotePath()),
|
||||
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
|
||||
},
|
||||
}
|
||||
var err error
|
||||
|
@ -704,7 +709,7 @@ func (f *Fs) Purge(ctx context.Context) (err error) {
|
|||
// Let dropbox delete the filesystem tree
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: enc.FromStandardPath(f.slashRoot),
|
||||
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
|
||||
})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
|
@ -736,8 +741,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
// Do the move
|
||||
arg := files.RelocationArg{
|
||||
RelocationPath: files.RelocationPath{
|
||||
FromPath: enc.FromStandardPath(srcObj.remotePath()),
|
||||
ToPath: enc.FromStandardPath(dstObj.remotePath()),
|
||||
FromPath: f.opt.Enc.FromStandardPath(srcObj.remotePath()),
|
||||
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
|
||||
},
|
||||
}
|
||||
var err error
|
||||
|
@ -764,7 +769,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
absPath := enc.FromStandardPath(path.Join(f.slashRoot, remote))
|
||||
absPath := f.opt.Enc.FromStandardPath(path.Join(f.slashRoot, remote))
|
||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||
Path: absPath,
|
||||
|
@ -840,8 +845,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||
// Do the move
|
||||
arg := files.RelocationArg{
|
||||
RelocationPath: files.RelocationPath{
|
||||
FromPath: enc.FromStandardPath(srcPath),
|
||||
ToPath: enc.FromStandardPath(dstPath),
|
||||
FromPath: f.opt.Enc.FromStandardPath(srcPath),
|
||||
ToPath: f.opt.Enc.FromStandardPath(dstPath),
|
||||
},
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
|
@ -999,7 +1004,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||
fs.FixRangeOption(options, o.bytes)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
arg := files.DownloadArg{
|
||||
Path: enc.FromStandardPath(o.remotePath()),
|
||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
||||
ExtraHeaders: headers,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
|
@ -1130,7 +1135,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
if ignoredFiles.MatchString(remote) {
|
||||
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||
}
|
||||
commitInfo := files.NewCommitInfo(enc.FromStandardPath(o.remotePath()))
|
||||
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
|
||||
commitInfo.Mode.Tag = "overwrite"
|
||||
// The Dropbox API only accepts timestamps in UTC with second precision.
|
||||
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
|
||||
|
@ -1156,7 +1161,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: enc.FromStandardPath(o.remotePath()),
|
||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
||||
})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
|
|
|
@ -109,7 +109,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
|
|||
}
|
||||
for i := range filesList.Items {
|
||||
item := &filesList.Items[i]
|
||||
item.Filename = enc.ToStandardName(item.Filename)
|
||||
item.Filename = f.opt.Enc.ToStandardName(item.Filename)
|
||||
}
|
||||
|
||||
return filesList, nil
|
||||
|
@ -135,10 +135,10 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
|
|||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list folders")
|
||||
}
|
||||
foldersList.Name = enc.ToStandardName(foldersList.Name)
|
||||
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
|
||||
for i := range foldersList.SubFolders {
|
||||
folder := &foldersList.SubFolders[i]
|
||||
folder.Name = enc.ToStandardName(folder.Name)
|
||||
folder.Name = f.opt.Enc.ToStandardName(folder.Name)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
|
||||
|
@ -213,7 +213,7 @@ func getRemote(dir, fileName string) string {
|
|||
}
|
||||
|
||||
func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) {
|
||||
name := enc.FromStandardName(leaf)
|
||||
name := f.opt.Enc.FromStandardName(leaf)
|
||||
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
|
||||
|
||||
request := MakeFolderRequest{
|
||||
|
@ -323,7 +323,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
|||
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
|
||||
// fs.Debugf(f, "Uploading File `%s`", fileName)
|
||||
|
||||
fileName = enc.FromStandardName(fileName)
|
||||
fileName = f.opt.Enc.FromStandardName(fileName)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
|
|
|
@ -11,12 +11,14 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
@ -29,8 +31,6 @@ const (
|
|||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
const enc = encodings.Fichier
|
||||
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "fichier",
|
||||
|
@ -38,25 +38,28 @@ func init() {
|
|||
Config: func(name string, config configmap.Mapper) {
|
||||
},
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
||||
Name: "api_key",
|
||||
},
|
||||
{
|
||||
Help: "If you want to download a shared folder, add this parameter",
|
||||
Name: "shared_folder",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
},
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
||||
Name: "api_key",
|
||||
}, {
|
||||
Help: "If you want to download a shared folder, add this parameter",
|
||||
Name: "shared_folder",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.Fichier,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
APIKey string `config:"api_key"`
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
APIKey string `config:"api_key"`
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs is the interface a cloud storage system must provide
|
||||
|
@ -64,9 +67,9 @@ type Fs struct {
|
|||
root string
|
||||
name string
|
||||
features *fs.Features
|
||||
opt Options
|
||||
dirCache *dircache.DirCache
|
||||
baseClient *http.Client
|
||||
options *Options
|
||||
pacer *fs.Pacer
|
||||
rest *rest.Client
|
||||
}
|
||||
|
@ -162,7 +165,7 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
|||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
options: opt,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
baseClient: &http.Client{},
|
||||
}
|
||||
|
@ -176,7 +179,7 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
|||
|
||||
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
|
||||
|
||||
f.rest.SetHeader("Authorization", "Bearer "+f.options.APIKey)
|
||||
f.rest.SetHeader("Authorization", "Bearer "+f.opt.APIKey)
|
||||
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
|
@ -226,8 +229,8 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
|||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.options.SharedFolder != "" {
|
||||
return f.listSharedFiles(ctx, f.options.SharedFolder)
|
||||
if f.opt.SharedFolder != "" {
|
||||
return f.listSharedFiles(ctx, f.opt.SharedFolder)
|
||||
}
|
||||
|
||||
dirContent, err := f.listDir(ctx, dir)
|
||||
|
|
|
@ -14,77 +14,81 @@ import (
|
|||
"github.com/jlaffaye/ftp"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
const enc = encodings.FTP
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "ftp",
|
||||
Description: "FTP Connection",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ftp.example.com",
|
||||
Help: "Connect to ftp.example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21)",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "tls",
|
||||
Help: "Use FTP over TLS (Implicit)",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_certificate",
|
||||
Help: "Do not verify the TLS certificate of the server",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_epsv",
|
||||
Help: "Disable using EPSV even if server advertises support",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ftp.example.com",
|
||||
Help: "Connect to ftp.example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21)",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "tls",
|
||||
Help: "Use FTP over TLS (Implicit)",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_certificate",
|
||||
Help: "Do not verify the TLS certificate of the server",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_epsv",
|
||||
Help: "Disable using EPSV even if server advertises support",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.FTP,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
TLS bool `config:"tls"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
TLS bool `config:"tls"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
|
@ -308,22 +312,22 @@ func translateErrorDir(err error) error {
|
|||
}
|
||||
|
||||
// entryToStandard converts an incoming ftp.Entry to Standard encoding
|
||||
func entryToStandard(entry *ftp.Entry) {
|
||||
func (f *Fs) entryToStandard(entry *ftp.Entry) {
|
||||
// Skip . and .. as we don't want these encoded
|
||||
if entry.Name == "." || entry.Name == ".." {
|
||||
return
|
||||
}
|
||||
entry.Name = enc.ToStandardName(entry.Name)
|
||||
entry.Target = enc.ToStandardPath(entry.Target)
|
||||
entry.Name = f.opt.Enc.ToStandardName(entry.Name)
|
||||
entry.Target = f.opt.Enc.ToStandardPath(entry.Target)
|
||||
}
|
||||
|
||||
// dirFromStandardPath returns dir in encoded form.
|
||||
func dirFromStandardPath(dir string) string {
|
||||
func (f *Fs) dirFromStandardPath(dir string) string {
|
||||
// Skip . and .. as we don't want these encoded
|
||||
if dir == "." || dir == ".." {
|
||||
return dir
|
||||
}
|
||||
return enc.FromStandardPath(dir)
|
||||
return f.opt.Enc.FromStandardPath(dir)
|
||||
}
|
||||
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
|
@ -345,13 +349,13 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
|||
if err != nil {
|
||||
return nil, errors.Wrap(err, "findItem")
|
||||
}
|
||||
files, err := c.List(dirFromStandardPath(dir))
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
for _, file := range files {
|
||||
entryToStandard(file)
|
||||
f.entryToStandard(file)
|
||||
if file.Name == base {
|
||||
return file, nil
|
||||
}
|
||||
|
@ -418,7 +422,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
resultchan := make(chan []*ftp.Entry, 1)
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
result, err := c.List(dirFromStandardPath(path.Join(f.root, dir)))
|
||||
result, err := c.List(f.dirFromStandardPath(path.Join(f.root, dir)))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
errchan <- err
|
||||
|
@ -455,7 +459,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
}
|
||||
for i := range files {
|
||||
object := files[i]
|
||||
entryToStandard(object)
|
||||
f.entryToStandard(object)
|
||||
newremote := path.Join(dir, object.Name)
|
||||
switch object.Type {
|
||||
case ftp.EntryTypeFolder:
|
||||
|
@ -525,7 +529,7 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
|||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getInfo")
|
||||
}
|
||||
files, err := c.List(dirFromStandardPath(dir))
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
|
@ -533,7 +537,7 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
|||
|
||||
for i := range files {
|
||||
file := files[i]
|
||||
entryToStandard(file)
|
||||
f.entryToStandard(file)
|
||||
if file.Name == base {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
|
@ -571,7 +575,7 @@ func (f *Fs) mkdir(abspath string) error {
|
|||
if connErr != nil {
|
||||
return errors.Wrap(connErr, "mkdir")
|
||||
}
|
||||
err = c.MakeDir(dirFromStandardPath(abspath))
|
||||
err = c.MakeDir(f.dirFromStandardPath(abspath))
|
||||
f.putFtpConnection(&c, err)
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
|
@ -607,7 +611,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||
if err != nil {
|
||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||
}
|
||||
err = c.RemoveDir(dirFromStandardPath(path.Join(f.root, dir)))
|
||||
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
|
||||
f.putFtpConnection(&c, err)
|
||||
return translateErrorDir(err)
|
||||
}
|
||||
|
@ -628,8 +632,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, errors.Wrap(err, "Move")
|
||||
}
|
||||
err = c.Rename(
|
||||
enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
|
||||
enc.FromStandardPath(path.Join(f.root, remote)),
|
||||
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
|
||||
f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
|
@ -682,8 +686,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||
return errors.Wrap(err, "DirMove")
|
||||
}
|
||||
err = c.Rename(
|
||||
dirFromStandardPath(srcPath),
|
||||
dirFromStandardPath(dstPath),
|
||||
f.dirFromStandardPath(srcPath),
|
||||
f.dirFromStandardPath(dstPath),
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
|
@ -809,7 +813,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
fd, err := c.RetrFrom(enc.FromStandardPath(path), uint64(offset))
|
||||
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, errors.Wrap(err, "open")
|
||||
|
@ -844,7 +848,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
if err != nil {
|
||||
return errors.Wrap(err, "Update")
|
||||
}
|
||||
err = c.Stor(enc.FromStandardPath(path), in)
|
||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||
if err != nil {
|
||||
_ = c.Quit() // toss this connection to avoid sync errors
|
||||
remove()
|
||||
|
@ -874,7 +878,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
}
|
||||
err = c.Delete(enc.FromStandardPath(path))
|
||||
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
return err
|
||||
|
|
|
@ -38,6 +38,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"golang.org/x/oauth2"
|
||||
|
@ -69,8 +70,6 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
const enc = encodings.GoogleCloudStorage
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
|
@ -248,20 +247,26 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
|||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||
Help: "Durable reduced availability storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.GoogleCloudStorage,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ProjectNumber string `config:"project_number"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
ObjectACL string `config:"object_acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
ProjectNumber string `config:"project_number"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
ObjectACL string `config:"object_acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
|
@ -353,7 +358,7 @@ func parsePath(path string) (root string) {
|
|||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
|
@ -442,7 +447,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists
|
||||
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
|
||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
|
@ -527,7 +532,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||
if !strings.HasSuffix(remote, "/") {
|
||||
continue
|
||||
}
|
||||
remote = enc.ToStandardPath(remote)
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
|
@ -543,7 +548,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||
}
|
||||
}
|
||||
for _, object := range objects.Items {
|
||||
remote := enc.ToStandardPath(object.Name)
|
||||
remote := f.opt.Enc.ToStandardPath(object.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
|
@ -620,7 +625,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
|||
return nil, err
|
||||
}
|
||||
for _, bucket := range buckets.Items {
|
||||
d := fs.NewDir(enc.ToStandardName(bucket.Name), time.Time{})
|
||||
d := fs.NewDir(f.opt.Enc.ToStandardName(bucket.Name), time.Time{})
|
||||
entries = append(entries, d)
|
||||
}
|
||||
if buckets.NextPageToken == "" {
|
||||
|
|
|
@ -31,14 +31,13 @@ import (
|
|||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.JottaCloud
|
||||
|
||||
// Globals
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
|
@ -157,18 +156,24 @@ func init() {
|
|||
Help: "Files bigger than this can be resumed if the upload fail's.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.JottaCloud,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Device string `config:"device"`
|
||||
Mountpoint string `config:"mountpoint"`
|
||||
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
Unlink bool `config:"unlink"`
|
||||
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
|
||||
Device string `config:"device"`
|
||||
Mountpoint string `config:"mountpoint"`
|
||||
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
Unlink bool `config:"unlink"`
|
||||
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote jottacloud
|
||||
|
@ -446,7 +451,7 @@ func urlPathEscape(in string) string {
|
|||
|
||||
// filePathRaw returns an unescaped file path (f.root, file)
|
||||
func (f *Fs) filePathRaw(file string) string {
|
||||
return path.Join(f.endpointURL, enc.FromStandardPath(path.Join(f.root, file)))
|
||||
return path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, file)
|
||||
|
@ -638,7 +643,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
if item.Deleted {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, enc.ToStandardName(item.Name))
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
|
||||
d := fs.NewDir(remote, time.Time(item.ModifiedAt))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
|
@ -648,7 +653,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
if item.Deleted || item.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, enc.ToStandardName(item.Name))
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
||||
if err != nil {
|
||||
continue
|
||||
|
@ -673,7 +678,7 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
|
|||
if folder.Deleted {
|
||||
return nil
|
||||
}
|
||||
folderPath := enc.ToStandardPath(path.Join(folder.Path, folder.Name))
|
||||
folderPath := f.opt.Enc.ToStandardPath(path.Join(folder.Path, folder.Name))
|
||||
folderPathLength := len(folderPath)
|
||||
var remoteDir string
|
||||
if folderPathLength > pathPrefixLength {
|
||||
|
@ -691,7 +696,7 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
|
|||
if file.Deleted || file.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
remoteFile := path.Join(remoteDir, enc.ToStandardName(file.Name))
|
||||
remoteFile := path.Join(remoteDir, f.opt.Enc.ToStandardName(file.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -862,7 +867,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
|||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, enc.FromStandardPath(path.Join(f.root, dest))))
|
||||
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, dest))))
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
|
@ -969,7 +974,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't move directory")
|
||||
|
@ -1260,7 +1265,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
Created: fileDate,
|
||||
Modified: fileDate,
|
||||
Md5: md5String,
|
||||
Path: path.Join(o.fs.opt.Mountpoint, enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
|
||||
Path: path.Join(o.fs.opt.Mountpoint, o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
|
||||
}
|
||||
|
||||
// send it
|
||||
|
|
|
@ -12,65 +12,69 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
|
||||
httpclient "github.com/koofr/go-httpclient"
|
||||
koofrclient "github.com/koofr/go-koofrclient"
|
||||
)
|
||||
|
||||
const enc = encodings.Koofr
|
||||
|
||||
// Register Fs with rclone
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "koofr",
|
||||
Description: "Koofr",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use",
|
||||
Default: "https://app.koofr.net",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "setmtime",
|
||||
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||
Default: true,
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use",
|
||||
Default: "https://app.koofr.net",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "setmtime",
|
||||
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||
Default: true,
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.Koofr,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options represent the configuration of the Koofr backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
MountID string `config:"mountid"`
|
||||
User string `config:"user"`
|
||||
Password string `config:"password"`
|
||||
SetMTime bool `config:"setmtime"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
MountID string `config:"mountid"`
|
||||
User string `config:"user"`
|
||||
Password string `config:"password"`
|
||||
SetMTime bool `config:"setmtime"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// A Fs is a representation of a remote Koofr Fs
|
||||
|
@ -246,7 +250,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||
|
||||
// fullPath constructs a full, absolute path from a Fs root relative path,
|
||||
func (f *Fs) fullPath(part string) string {
|
||||
return enc.FromStandardPath(path.Join("/", f.root, part))
|
||||
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
|
@ -299,7 +303,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||
}
|
||||
return nil, errors.New("Failed to find mount " + opt.MountID)
|
||||
}
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, enc.FromStandardPath("/"+f.root))
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, f.opt.Enc.FromStandardPath("/"+f.root))
|
||||
if err == nil && rootFile.Type != "dir" {
|
||||
f.root = dir(f.root)
|
||||
err = fs.ErrorIsFile
|
||||
|
@ -317,7 +321,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
}
|
||||
entries = make([]fs.DirEntry, len(files))
|
||||
for i, file := range files {
|
||||
remote := path.Join(dir, enc.ToStandardName(file.Name))
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
|
||||
if file.Type == "dir" {
|
||||
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
|
||||
} else {
|
||||
|
|
|
@ -6,4 +6,4 @@ import (
|
|||
"github.com/rclone/rclone/fs/encodings"
|
||||
)
|
||||
|
||||
const enc = encodings.LocalMacOS
|
||||
const defaultEnc = encodings.LocalMacOS
|
||||
|
|
|
@ -6,4 +6,4 @@ import (
|
|||
"github.com/rclone/rclone/fs/encodings"
|
||||
)
|
||||
|
||||
const enc = encodings.LocalUnix
|
||||
const defaultEnc = encodings.LocalUnix
|
||||
|
|
|
@ -6,4 +6,4 @@ import (
|
|||
"github.com/rclone/rclone/fs/encodings"
|
||||
)
|
||||
|
||||
const enc = encodings.LocalWindows
|
||||
const defaultEnc = encodings.LocalWindows
|
||||
|
|
|
@ -20,10 +20,12 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
@ -115,6 +117,11 @@ Windows/macOS and case sensitive for everything else. Use this flag
|
|||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: defaultEnc,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
|
@ -122,15 +129,16 @@ to override the default choice.`,
|
|||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
TranslateSymlinks bool `config:"links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
CaseSensitive bool `config:"case_sensitive"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
TranslateSymlinks bool `config:"links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
CaseSensitive bool `config:"case_sensitive"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
|
@ -189,7 +197,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
}
|
||||
f.root = cleanRootPath(root, f.opt.NoUNC)
|
||||
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
CanHaveEmptyDirectories: true,
|
||||
|
@ -234,7 +242,7 @@ func (f *Fs) Name() string {
|
|||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return enc.ToStandardPath(filepath.ToSlash(f.root))
|
||||
return f.opt.Enc.ToStandardPath(filepath.ToSlash(f.root))
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
|
@ -443,7 +451,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
}
|
||||
|
||||
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
|
||||
remote = path.Join(dir, enc.ToStandardName(filename))
|
||||
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
|
||||
|
||||
if !utf8.ValidString(filename) {
|
||||
f.warnedMu.Lock()
|
||||
|
@ -457,7 +465,7 @@ func (f *Fs) cleanRemote(dir, filename string) (remote string) {
|
|||
}
|
||||
|
||||
func (f *Fs) localPath(name string) string {
|
||||
return filepath.Join(f.root, filepath.FromSlash(enc.FromStandardPath(name)))
|
||||
return filepath.Join(f.root, filepath.FromSlash(f.opt.Enc.FromStandardPath(name)))
|
||||
}
|
||||
|
||||
// Put the Object to the local filesystem
|
||||
|
@ -1092,7 +1100,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
|||
return remove(o.path)
|
||||
}
|
||||
|
||||
func cleanRootPath(s string, noUNC bool) string {
|
||||
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||
s2, err := filepath.Abs(s)
|
||||
|
|
|
@ -64,7 +64,7 @@ func TestCleanWindows(t *testing.T) {
|
|||
t.Skipf("windows only")
|
||||
}
|
||||
for _, test := range testsWindows {
|
||||
got := cleanRootPath(test[0], true)
|
||||
got := cleanRootPath(test[0], true, defaultEnc)
|
||||
expect := test[1]
|
||||
if got != expect {
|
||||
t.Fatalf("got %q, expected %q", got, expect)
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/rclone/rclone/backend/mailru/mrhash"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
|
@ -34,6 +35,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
|
@ -42,8 +44,6 @@ import (
|
|||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.Mailru
|
||||
|
||||
// Global constants
|
||||
const (
|
||||
minSleepPacer = 10 * time.Millisecond
|
||||
|
@ -193,21 +193,27 @@ facilitate remote troubleshooting of backend issues. Strict meaning of
|
|||
flags is not documented and not guaranteed to persist between releases.
|
||||
Quirks will be removed when the backend grows stable.
|
||||
Supported quirks: atomicmkdir binlist gzip insecure retry400`,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.Mailru,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Username string `config:"user"`
|
||||
Password string `config:"pass"`
|
||||
UserAgent string `config:"user_agent"`
|
||||
CheckHash bool `config:"check_hash"`
|
||||
SpeedupEnable bool `config:"speedup_enable"`
|
||||
SpeedupPatterns string `config:"speedup_file_patterns"`
|
||||
SpeedupMaxDisk fs.SizeSuffix `config:"speedup_max_disk"`
|
||||
SpeedupMaxMem fs.SizeSuffix `config:"speedup_max_memory"`
|
||||
Quirks string `config:"quirks"`
|
||||
Username string `config:"user"`
|
||||
Password string `config:"pass"`
|
||||
UserAgent string `config:"user_agent"`
|
||||
CheckHash bool `config:"check_hash"`
|
||||
SpeedupEnable bool `config:"speedup_enable"`
|
||||
SpeedupPatterns string `config:"speedup_file_patterns"`
|
||||
SpeedupMaxDisk fs.SizeSuffix `config:"speedup_max_disk"`
|
||||
SpeedupMaxMem fs.SizeSuffix `config:"speedup_max_memory"`
|
||||
Quirks string `config:"quirks"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
|
@ -607,7 +613,7 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
|
|||
Path: "/api/m1/file",
|
||||
Parameters: url.Values{
|
||||
"access_token": {token},
|
||||
"home": {enc.FromStandardPath(path)},
|
||||
"home": {f.opt.Enc.FromStandardPath(path)},
|
||||
"offset": {"0"},
|
||||
"limit": {strconv.Itoa(maxInt32)},
|
||||
},
|
||||
|
@ -642,7 +648,7 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
|
|||
// =0 - for an empty directory
|
||||
// >0 - for a non-empty directory
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.DirEntry, dirSize int, err error) {
|
||||
remote, err := f.relPath(enc.ToStandardPath(item.Home))
|
||||
remote, err := f.relPath(f.opt.Enc.ToStandardPath(item.Home))
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
|
@ -708,7 +714,7 @@ func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int)
|
|||
params.Set("limit", strconv.Itoa(limit))
|
||||
|
||||
data := url.Values{}
|
||||
data.Set("home", enc.FromStandardPath(dirPath))
|
||||
data.Set("home", f.opt.Enc.FromStandardPath(dirPath))
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
@ -756,7 +762,7 @@ func (f *Fs) listBin(ctx context.Context, dirPath string, depth int) (entries fs
|
|||
|
||||
req := api.NewBinWriter()
|
||||
req.WritePu16(api.OperationFolderList)
|
||||
req.WriteString(enc.FromStandardPath(dirPath))
|
||||
req.WriteString(f.opt.Enc.FromStandardPath(dirPath))
|
||||
req.WritePu32(int64(depth))
|
||||
req.WritePu32(int64(options))
|
||||
req.WritePu32(0)
|
||||
|
@ -892,7 +898,7 @@ func (t *treeState) NextRecord() (fs.DirEntry, error) {
|
|||
if (head & 4096) != 0 {
|
||||
t.dunnoNodeID = r.ReadNBytes(api.DunnoNodeIDLength)
|
||||
}
|
||||
name := enc.FromStandardPath(string(r.ReadBytesByLength()))
|
||||
name := t.f.opt.Enc.FromStandardPath(string(r.ReadBytesByLength()))
|
||||
t.dunno1 = int(r.ReadULong())
|
||||
t.dunno2 = 0
|
||||
t.dunno3 = 0
|
||||
|
@ -1031,7 +1037,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) error {
|
|||
req := api.NewBinWriter()
|
||||
req.WritePu16(api.OperationCreateFolder)
|
||||
req.WritePu16(0) // revision
|
||||
req.WriteString(enc.FromStandardPath(path))
|
||||
req.WriteString(f.opt.Enc.FromStandardPath(path))
|
||||
req.WritePu32(0)
|
||||
|
||||
token, err := f.accessToken()
|
||||
|
@ -1186,7 +1192,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) error {
|
|||
return err
|
||||
}
|
||||
|
||||
data := url.Values{"home": {enc.FromStandardPath(path)}}
|
||||
data := url.Values{"home": {f.opt.Enc.FromStandardPath(path)}}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/api/m1/file/remove",
|
||||
|
@ -1243,8 +1249,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
}
|
||||
|
||||
data := url.Values{}
|
||||
data.Set("home", enc.FromStandardPath(srcPath))
|
||||
data.Set("folder", enc.FromStandardPath(parentDir(dstPath)))
|
||||
data.Set("home", f.opt.Enc.FromStandardPath(srcPath))
|
||||
data.Set("folder", f.opt.Enc.FromStandardPath(parentDir(dstPath)))
|
||||
data.Set("email", f.opt.Username)
|
||||
data.Set("x-email", f.opt.Username)
|
||||
|
||||
|
@ -1282,7 +1288,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, fmt.Errorf("copy failed with code %d", response.Status)
|
||||
}
|
||||
|
||||
tmpPath := enc.ToStandardPath(response.Body)
|
||||
tmpPath := f.opt.Enc.ToStandardPath(response.Body)
|
||||
if tmpPath != dstPath {
|
||||
// fs.Debugf(f, "rename temporary file %q -> %q\n", tmpPath, dstPath)
|
||||
err = f.moveItemBin(ctx, tmpPath, dstPath, "rename temporary file")
|
||||
|
@ -1357,9 +1363,9 @@ func (f *Fs) moveItemBin(ctx context.Context, srcPath, dstPath, opName string) e
|
|||
req := api.NewBinWriter()
|
||||
req.WritePu16(api.OperationRename)
|
||||
req.WritePu32(0) // old revision
|
||||
req.WriteString(enc.FromStandardPath(srcPath))
|
||||
req.WriteString(f.opt.Enc.FromStandardPath(srcPath))
|
||||
req.WritePu32(0) // new revision
|
||||
req.WriteString(enc.FromStandardPath(dstPath))
|
||||
req.WriteString(f.opt.Enc.FromStandardPath(dstPath))
|
||||
req.WritePu32(0) // dunno
|
||||
|
||||
opts := rest.Opts{
|
||||
|
@ -1450,7 +1456,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
|||
}
|
||||
|
||||
data := url.Values{}
|
||||
data.Set("home", enc.FromStandardPath(f.absPath(remote)))
|
||||
data.Set("home", f.opt.Enc.FromStandardPath(f.absPath(remote)))
|
||||
data.Set("email", f.opt.Username)
|
||||
data.Set("x-email", f.opt.Username)
|
||||
|
||||
|
@ -2015,7 +2021,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
|
|||
req := api.NewBinWriter()
|
||||
req.WritePu16(api.OperationAddFile)
|
||||
req.WritePu16(0) // revision
|
||||
req.WriteString(enc.FromStandardPath(o.absPath()))
|
||||
req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath()))
|
||||
req.WritePu64(o.size)
|
||||
req.WritePu64(o.modTime.Unix())
|
||||
req.WritePu32(0)
|
||||
|
@ -2113,7 +2119,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Options: options,
|
||||
Path: url.PathEscape(strings.TrimLeft(enc.FromStandardPath(o.absPath()), "/")),
|
||||
Path: url.PathEscape(strings.TrimLeft(o.fs.opt.Enc.FromStandardPath(o.absPath()), "/")),
|
||||
Parameters: url.Values{
|
||||
"client_id": {api.OAuthClientID},
|
||||
"token": {token},
|
||||
|
|
|
@ -26,19 +26,19 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
mega "github.com/t3rm1n4l/go-mega"
|
||||
)
|
||||
|
||||
const enc = encodings.Mega
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
|
@ -83,16 +83,22 @@ than permanently deleting them. If you specify this then rclone will
|
|||
permanently delete objects instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.Mega,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Debug bool `config:"debug"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Debug bool `config:"debug"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote mega
|
||||
|
@ -250,12 +256,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||
// splitNodePath splits nodePath into / separated parts, returning nil if it
|
||||
// should refer to the root.
|
||||
// It also encodes the parts into backend specific encoding
|
||||
func splitNodePath(nodePath string) (parts []string) {
|
||||
func (f *Fs) splitNodePath(nodePath string) (parts []string) {
|
||||
nodePath = path.Clean(nodePath)
|
||||
if nodePath == "." || nodePath == "/" {
|
||||
return nil
|
||||
}
|
||||
nodePath = enc.FromStandardPath(nodePath)
|
||||
nodePath = f.opt.Enc.FromStandardPath(nodePath)
|
||||
return strings.Split(nodePath, "/")
|
||||
}
|
||||
|
||||
|
@ -263,7 +269,7 @@ func splitNodePath(nodePath string) (parts []string) {
|
|||
//
|
||||
// It returns mega.ENOENT if it wasn't found
|
||||
func (f *Fs) findNode(rootNode *mega.Node, nodePath string) (*mega.Node, error) {
|
||||
parts := splitNodePath(nodePath)
|
||||
parts := f.splitNodePath(nodePath)
|
||||
if parts == nil {
|
||||
return rootNode, nil
|
||||
}
|
||||
|
@ -320,7 +326,7 @@ func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error)
|
|||
f.mkdirMu.Lock()
|
||||
defer f.mkdirMu.Unlock()
|
||||
|
||||
parts := splitNodePath(dir)
|
||||
parts := f.splitNodePath(dir)
|
||||
if parts == nil {
|
||||
return rootNode, nil
|
||||
}
|
||||
|
@ -422,7 +428,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|||
errors := 0
|
||||
// similar to f.deleteNode(trash) but with HardDelete as true
|
||||
for _, item := range items {
|
||||
fs.Debugf(f, "Deleting trash %q", enc.ToStandardName(item.GetName()))
|
||||
fs.Debugf(f, "Deleting trash %q", f.opt.Enc.ToStandardName(item.GetName()))
|
||||
deleteErr := f.pacer.Call(func() (bool, error) {
|
||||
err := f.srv.Delete(item, true)
|
||||
return shouldRetry(err)
|
||||
|
@ -504,7 +510,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
}
|
||||
var iErr error
|
||||
_, err = f.list(ctx, dirNode, func(info *mega.Node) bool {
|
||||
remote := path.Join(dir, enc.ToStandardName(info.GetName()))
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(info.GetName()))
|
||||
switch info.GetType() {
|
||||
case mega.FOLDER, mega.ROOT, mega.INBOX, mega.TRASH:
|
||||
d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
|
||||
|
@ -726,7 +732,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
|||
if srcLeaf != dstLeaf {
|
||||
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Rename(info, enc.FromStandardName(dstLeaf))
|
||||
err = f.srv.Rename(info, f.opt.Enc.FromStandardName(dstLeaf))
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -875,13 +881,13 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||
}
|
||||
// move them into place
|
||||
for _, info := range infos {
|
||||
fs.Infof(srcDir, "merging %q", enc.ToStandardName(info.GetName()))
|
||||
fs.Infof(srcDir, "merging %q", f.opt.Enc.ToStandardName(info.GetName()))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Move(info, dstDirNode)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", enc.ToStandardName(info.GetName()), srcDir)
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", f.opt.Enc.ToStandardName(info.GetName()), srcDir)
|
||||
}
|
||||
}
|
||||
// rmdir (into trash) the now empty source directory
|
||||
|
@ -1124,7 +1130,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
|
||||
var u *mega.Upload
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
u, err = o.fs.srv.NewUpload(dirNode, enc.FromStandardName(leaf), size)
|
||||
u, err = o.fs.srv.NewUpload(dirNode, o.fs.opt.Enc.FromStandardName(leaf), size)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
|
@ -35,8 +36,6 @@ import (
|
|||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.OneDrive
|
||||
|
||||
const (
|
||||
rcloneClientID = "b15665d9-eda6-4092-8539-0eec376afd59"
|
||||
rcloneEncryptedClientSecret = "_JUdzh3LnKNqSPcf4Wu5fgMFIQOI8glZu_akYgR8yf6egowNBg-R"
|
||||
|
@ -252,16 +251,22 @@ delete OneNote files or otherwise want them to show up in directory
|
|||
listing, set this option.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.OneDrive,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote one drive
|
||||
|
@ -355,7 +360,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
|||
// If `relPath` == '', do not append the slash (See #3664)
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
if relPath != "" {
|
||||
relPath = "/" + withTrailingColon(rest.URLPathEscape(enc.FromStandardPath(relPath)))
|
||||
relPath = "/" + withTrailingColon(rest.URLPathEscape(f.opt.Enc.FromStandardPath(relPath)))
|
||||
}
|
||||
opts := newOptsCall(normalizedID, "GET", ":"+relPath)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
|
@ -380,7 +385,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
|||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(enc.FromStandardPath(path)),
|
||||
Path: "/root:/" + rest.URLPathEscape(f.opt.Enc.FromStandardPath(path)),
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
|
@ -628,7 +633,7 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
|
|||
var info *api.Item
|
||||
opts := newOptsCall(dirID, "POST", "/children")
|
||||
mkdir := api.CreateItemRequest{
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
ConflictBehavior: "fail",
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
|
@ -688,7 +693,7 @@ OUTER:
|
|||
if item.Deleted != nil {
|
||||
continue
|
||||
}
|
||||
item.Name = enc.ToStandardName(item.GetName())
|
||||
item.Name = f.opt.Enc.ToStandardName(item.GetName())
|
||||
if fn(item) {
|
||||
found = true
|
||||
break OUTER
|
||||
|
@ -944,7 +949,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
|
||||
replacedLeaf := enc.FromStandardName(leaf)
|
||||
replacedLeaf := f.opt.Enc.FromStandardName(leaf)
|
||||
copyReq := api.CopyItemRequest{
|
||||
Name: &replacedLeaf,
|
||||
ParentReference: api.ItemReference{
|
||||
|
@ -1028,7 +1033,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
opts := newOptsCall(srcObj.id, "PATCH", "")
|
||||
|
||||
move := api.MoveItemRequest{
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
DriveID: dstDriveID,
|
||||
ID: id,
|
||||
|
@ -1143,7 +1148,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||
// Do the move
|
||||
opts := newOptsCall(srcID, "PATCH", "")
|
||||
move := api.MoveItemRequest{
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
DriveID: dstDriveID,
|
||||
ID: parsedDstDirID,
|
||||
|
@ -1265,7 +1270,7 @@ func (o *Object) rootPath() string {
|
|||
|
||||
// srvPath returns a path for use in server given a remote
|
||||
func (f *Fs) srvPath(remote string) string {
|
||||
return enc.FromStandardPath(f.rootSlash() + remote)
|
||||
return f.opt.Enc.FromStandardPath(f.rootSlash() + remote)
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server
|
||||
|
@ -1377,7 +1382,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
|||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(enc.FromStandardName(leaf))),
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf))),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
|
@ -1452,7 +1457,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
|
|||
Method: "POST",
|
||||
RootURL: rootURL,
|
||||
Path: fmt.Sprintf("/%s/items/%s:/%s:/createUploadSession",
|
||||
drive, id, rest.URLPathEscape(enc.FromStandardName(leaf))),
|
||||
drive, id, rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf))),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
|
@ -1604,7 +1609,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
|||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(enc.FromStandardName(leaf)) + ":/content",
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf)) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
|
@ -21,13 +22,12 @@ import (
|
|||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const enc = encodings.OpenDrive
|
||||
|
||||
const (
|
||||
defaultEndpoint = "https://dev.opendrive.com/api/v1"
|
||||
minSleep = 10 * time.Millisecond
|
||||
|
@ -50,14 +50,20 @@ func init() {
|
|||
Help: "Password.",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.OpenDrive,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UserName string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
UserName string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote server
|
||||
|
@ -588,7 +594,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
|||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, enc.FromStandardName(leaf), directoryID, nil
|
||||
return o, f.opt.Enc.FromStandardName(leaf), directoryID, nil
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
|
@ -690,7 +696,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||
err = f.pacer.Call(func() (bool, error) {
|
||||
createDirData := createFolder{
|
||||
SessionID: f.session.SessionID,
|
||||
FolderName: enc.FromStandardName(leaf),
|
||||
FolderName: f.opt.Enc.FromStandardName(leaf),
|
||||
FolderSubParent: pathID,
|
||||
FolderIsPublic: 0,
|
||||
FolderPublicUpl: 0,
|
||||
|
@ -736,7 +742,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
|||
return "", false, errors.Wrap(err, "failed to get folder list")
|
||||
}
|
||||
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
leaf = f.opt.Enc.FromStandardName(leaf)
|
||||
for _, folder := range folderList.Folders {
|
||||
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
|
||||
|
||||
|
@ -784,7 +790,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
}
|
||||
|
||||
for _, folder := range folderList.Folders {
|
||||
folder.Name = enc.ToStandardName(folder.Name)
|
||||
folder.Name = f.opt.Enc.ToStandardName(folder.Name)
|
||||
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
|
||||
remote := path.Join(dir, folder.Name)
|
||||
// cache the directory ID for later lookups
|
||||
|
@ -795,7 +801,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
}
|
||||
|
||||
for _, file := range folderList.Files {
|
||||
file.Name = enc.ToStandardName(file.Name)
|
||||
file.Name = f.opt.Enc.ToStandardName(file.Name)
|
||||
// fs.Debugf(nil, "File: %s (%s)", file.Name, file.FileID)
|
||||
remote := path.Join(dir, file.Name)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, &file)
|
||||
|
@ -1050,7 +1056,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: fmt.Sprintf("/folder/itembyname.json/%s/%s?name=%s",
|
||||
o.fs.session.SessionID, directoryID, url.QueryEscape(enc.FromStandardName(leaf))),
|
||||
o.fs.session.SessionID, directoryID, url.QueryEscape(o.fs.opt.Enc.FromStandardName(leaf))),
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
|
|
|
@ -30,14 +30,13 @@ import (
|
|||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.Pcloud
|
||||
|
||||
const (
|
||||
rcloneClientID = "DnONSzyJXpm"
|
||||
rcloneEncryptedClientSecret = "ej1OIF39VOQQ0PXaSdK9ztkLw3tdLNscW2157TKNQdQKkICR4uU7aFg4eFM"
|
||||
|
@ -81,12 +80,18 @@ func init() {
|
|||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Pcloud App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.Pcloud,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote pcloud
|
||||
|
@ -342,7 +347,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||
Path: "/createfolder",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("name", enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("name", f.opt.Enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(pathID))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
|
@ -418,7 +423,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
|||
continue
|
||||
}
|
||||
}
|
||||
item.Name = enc.ToStandardName(item.Name)
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break
|
||||
|
@ -610,7 +615,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
|
||||
opts.Parameters.Set("toname", enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", srcObj.modTime.Unix()))
|
||||
var resp *http.Response
|
||||
|
@ -689,7 +694,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
|
||||
opts.Parameters.Set("toname", enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
|
@ -786,7 +791,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(srcID))
|
||||
opts.Parameters.Set("toname", enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
|
@ -1066,7 +1071,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
}
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
leaf = o.fs.opt.Enc.FromStandardName(leaf)
|
||||
opts.Parameters.Set("filename", leaf)
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("nopartial", "1")
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/premiumizeme/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
|
@ -39,6 +40,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
|
@ -46,8 +48,6 @@ import (
|
|||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.PremiumizeMe
|
||||
|
||||
const (
|
||||
rcloneClientID = "658922194"
|
||||
rcloneEncryptedClientSecret = "B5YIvQoRIhcpAYs8HYeyjb9gK-ftmZEbqdh_gNfc4RgO9Q"
|
||||
|
@ -93,13 +93,19 @@ This is not normally used - use oauth instead.
|
|||
`,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Default: "",
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.PremiumizeMe,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
APIKey string `config:"api_key"`
|
||||
APIKey string `config:"api_key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote cloud storage system
|
||||
|
@ -364,7 +370,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||
Path: "/folder/create",
|
||||
Parameters: f.baseParams(),
|
||||
MultipartParams: url.Values{
|
||||
"name": {enc.FromStandardName(leaf)},
|
||||
"name": {f.opt.Enc.FromStandardName(leaf)},
|
||||
"parent_id": {pathID},
|
||||
},
|
||||
}
|
||||
|
@ -429,7 +435,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
|||
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
||||
continue
|
||||
}
|
||||
item.Name = enc.ToStandardName(item.Name)
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break
|
||||
|
@ -637,8 +643,8 @@ func (f *Fs) Purge(ctx context.Context) error {
|
|||
// between directories and a separate one to rename them. We try to
|
||||
// call the minimum number of API calls.
|
||||
func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDirectoryID, newDirectoryID string) (err error) {
|
||||
newLeaf = enc.FromStandardName(newLeaf)
|
||||
oldLeaf = enc.FromStandardName(oldLeaf)
|
||||
newLeaf = f.opt.Enc.FromStandardName(newLeaf)
|
||||
oldLeaf = f.opt.Enc.FromStandardName(oldLeaf)
|
||||
doRenameLeaf := oldLeaf != newLeaf
|
||||
doMove := oldDirectoryID != newDirectoryID
|
||||
|
||||
|
@ -891,7 +897,7 @@ func (o *Object) Remote() string {
|
|||
|
||||
// srvPath returns a path for use in server
|
||||
func (o *Object) srvPath() string {
|
||||
return enc.FromStandardPath(o.fs.rootSlash() + o.remote)
|
||||
return o.fs.opt.Enc.FromStandardPath(o.fs.rootSlash() + o.remote)
|
||||
}
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
|
@ -1006,7 +1012,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
leaf = o.fs.opt.Enc.FromStandardName(leaf)
|
||||
|
||||
var resp *http.Response
|
||||
var info api.FolderUploadinfoResponse
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"github.com/putdotio/go-putio/putio"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
|
@ -29,6 +30,7 @@ type Fs struct {
|
|||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
client *putio.Client // client for making API calls to Put.io
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
|
@ -60,6 +62,12 @@ func (f *Fs) Features() *fs.Features {
|
|||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
// defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, putioConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure putio")
|
||||
|
@ -67,6 +75,7 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
|||
p := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
client: putio.NewClient(oAuthClient),
|
||||
oAuthClient: oAuthClient,
|
||||
|
@ -127,7 +136,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||
var entry putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "creating folder. part: %s, parentID: %d", leaf, parentID)
|
||||
entry, err = f.client.Files.CreateFolder(ctx, enc.FromStandardName(leaf), parentID)
|
||||
entry, err = f.client.Files.CreateFolder(ctx, f.opt.Enc.FromStandardName(leaf), parentID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return itoa(entry.ID), err
|
||||
|
@ -154,7 +163,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
|||
return
|
||||
}
|
||||
for _, child := range children {
|
||||
if enc.ToStandardName(child.Name) == leaf {
|
||||
if f.opt.Enc.ToStandardName(child.Name) == leaf {
|
||||
found = true
|
||||
pathIDOut = itoa(child.ID)
|
||||
if !child.IsDir() {
|
||||
|
@ -196,7 +205,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
return
|
||||
}
|
||||
for _, child := range children {
|
||||
remote := path.Join(dir, enc.ToStandardName(child.Name))
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(child.Name))
|
||||
// fs.Debugf(f, "child: %s", remote)
|
||||
if child.IsDir() {
|
||||
f.dirCache.Put(remote, itoa(child.ID))
|
||||
|
@ -274,7 +283,7 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
|
|||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
req.Header.Set("upload-length", strconv.FormatInt(size, 10))
|
||||
b64name := base64.StdEncoding.EncodeToString([]byte(enc.FromStandardName(name)))
|
||||
b64name := base64.StdEncoding.EncodeToString([]byte(f.opt.Enc.FromStandardName(name)))
|
||||
b64true := base64.StdEncoding.EncodeToString([]byte("true"))
|
||||
b64parentID := base64.StdEncoding.EncodeToString([]byte(parentID))
|
||||
b64modifiedAt := base64.StdEncoding.EncodeToString([]byte(modTime.Format(time.RFC3339)))
|
||||
|
@ -546,7 +555,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
|||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
params.Set("parent_id", directoryID)
|
||||
params.Set("name", enc.FromStandardName(leaf))
|
||||
params.Set("name", f.opt.Enc.FromStandardName(leaf))
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/copy", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -585,7 +594,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
|||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
params.Set("parent_id", directoryID)
|
||||
params.Set("name", enc.FromStandardName(leaf))
|
||||
params.Set("name", f.opt.Enc.FromStandardName(leaf))
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -674,7 +683,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||
params := url.Values{}
|
||||
params.Set("file_id", srcID)
|
||||
params.Set("parent_id", dstDirectoryID)
|
||||
params.Set("name", enc.FromStandardName(leaf))
|
||||
params.Set("name", f.opt.Enc.FromStandardName(leaf))
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
|
@ -137,7 +137,7 @@ func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) {
|
|||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(o, "requesting child. directoryID: %s, name: %s", directoryID, leaf)
|
||||
req, err := o.fs.client.NewRequest(ctx, "GET", "/v2/files/"+directoryID+"/child?name="+url.QueryEscape(enc.FromStandardName(leaf)), nil)
|
||||
req, err := o.fs.client.NewRequest(ctx, "GET", "/v2/files/"+directoryID+"/child?name="+url.QueryEscape(o.fs.opt.Enc.FromStandardName(leaf)), nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
|
@ -6,10 +6,12 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
@ -25,7 +27,6 @@ canReadUnnormalized = true
|
|||
canReadRenormalized = true
|
||||
canStream = false
|
||||
*/
|
||||
const enc = encodings.Putio
|
||||
|
||||
// Constants
|
||||
const (
|
||||
|
@ -65,9 +66,20 @@ func init() {
|
|||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.Putio,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
|
@ -25,13 +26,12 @@ import (
|
|||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
qsConfig "github.com/yunify/qingstor-sdk-go/v3/config"
|
||||
qsErr "github.com/yunify/qingstor-sdk-go/v3/request/errors"
|
||||
qs "github.com/yunify/qingstor-sdk-go/v3/service"
|
||||
)
|
||||
|
||||
const enc = encodings.QingStor
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
|
@ -113,6 +113,11 @@ and these uploads do not fully utilize your bandwidth, then increasing
|
|||
this may help to speed up the transfers.`,
|
||||
Default: 1,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.QingStor,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
@ -136,15 +141,16 @@ func timestampToTime(tp int64) time.Time {
|
|||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Zone string `config:"zone"`
|
||||
ConnectionRetries int `config:"connection_retries"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Zone string `config:"zone"`
|
||||
ConnectionRetries int `config:"connection_retries"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote qingstor server
|
||||
|
@ -188,7 +194,7 @@ func parsePath(path string) (root string) {
|
|||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
|
@ -357,7 +363,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
|
||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||
_, err = bucketInit.HeadObject(encodedDirectory, &qs.HeadObjectInput{})
|
||||
if err == nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
|
@ -555,7 +561,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||
continue
|
||||
}
|
||||
remote := *commonPrefix
|
||||
remote = enc.ToStandardPath(remote)
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
|
@ -576,7 +582,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||
|
||||
for _, object := range resp.Keys {
|
||||
remote := qs.StringValue(object.Key)
|
||||
remote = enc.ToStandardPath(remote)
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
|
@ -653,7 +659,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
|||
}
|
||||
|
||||
for _, bucket := range resp.Buckets {
|
||||
d := fs.NewDir(enc.ToStandardName(qs.StringValue(bucket.Name)), qs.TimeValue(bucket.Created))
|
||||
d := fs.NewDir(f.opt.Enc.ToStandardName(qs.StringValue(bucket.Name)), qs.TimeValue(bucket.Created))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
return entries, nil
|
||||
|
|
|
@ -46,6 +46,7 @@ import (
|
|||
"github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
|
@ -54,14 +55,13 @@ import (
|
|||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const enc = encodings.S3
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
|
@ -811,6 +811,11 @@ In Ceph, this can be increased with the "rgw list buckets max chunk" option.
|
|||
`,
|
||||
Default: 1000,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.S3,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
@ -830,29 +835,30 @@ const (
|
|||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ForcePathStyle bool `config:"force_path_style"`
|
||||
V2Auth bool `config:"v2_auth"`
|
||||
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
Provider string `config:"provider"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ForcePathStyle bool `config:"force_path_style"`
|
||||
V2Auth bool `config:"v2_auth"`
|
||||
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
|
@ -965,7 +971,7 @@ func parsePath(path string) (root string) {
|
|||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
|
@ -1166,7 +1172,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||
}).Fill(f)
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists
|
||||
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
|
||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||
req := s3.HeadObjectInput{
|
||||
Bucket: &f.rootBucket,
|
||||
Key: &encodedDirectory,
|
||||
|
@ -1369,7 +1375,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||
continue
|
||||
}
|
||||
}
|
||||
remote = enc.ToStandardPath(remote)
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
|
@ -1396,7 +1402,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||
continue
|
||||
}
|
||||
}
|
||||
remote = enc.ToStandardPath(remote)
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
|
@ -1487,7 +1493,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
|||
return nil, err
|
||||
}
|
||||
for _, bucket := range resp.Buckets {
|
||||
bucketName := enc.ToStandardName(aws.StringValue(bucket.Name))
|
||||
bucketName := f.opt.Enc.ToStandardName(aws.StringValue(bucket.Name))
|
||||
f.cache.MarkOK(bucketName)
|
||||
d := fs.NewDir(bucketName, aws.TimeValue(bucket.CreationDate))
|
||||
entries = append(entries, d)
|
||||
|
|
|
@ -87,6 +87,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/sharefile/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
|
@ -94,6 +95,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
|
@ -101,8 +103,6 @@ import (
|
|||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.Sharefile
|
||||
|
||||
const (
|
||||
rcloneClientID = "djQUPlHTUM9EvayYBWuKC5IrVIoQde46"
|
||||
rcloneEncryptedClientSecret = "v7572bKhUindQL3yDnUAebmgP-QxiwT38JLxVPolcZBl6SSs329MtFzH73x7BeELmMVZtneUPvALSopUZ6VkhQ"
|
||||
|
@ -204,16 +204,22 @@ be set manually to something like: https://XXX.sharefile.com
|
|||
`,
|
||||
Advanced: true,
|
||||
Default: "",
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.Sharefile,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote cloud storage system
|
||||
|
@ -301,7 +307,7 @@ func (f *Fs) readMetaDataForIDPath(ctx context.Context, id, path string, directo
|
|||
}
|
||||
if path != "" {
|
||||
opts.Path += "/ByPath"
|
||||
opts.Parameters.Set("path", "/"+enc.FromStandardPath(path))
|
||||
opts.Parameters.Set("path", "/"+f.opt.Enc.FromStandardPath(path))
|
||||
}
|
||||
var item api.Item
|
||||
var resp *http.Response
|
||||
|
@ -595,7 +601,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
|||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||
var resp *http.Response
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
leaf = f.opt.Enc.FromStandardName(leaf)
|
||||
var req = api.Item{
|
||||
Name: leaf,
|
||||
FileName: leaf,
|
||||
|
@ -664,7 +670,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
|||
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
||||
continue
|
||||
}
|
||||
item.Name = enc.ToStandardName(item.Name)
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break
|
||||
|
@ -873,7 +879,7 @@ func (f *Fs) updateItem(ctx context.Context, id, leaf, directoryID string, modTi
|
|||
"overwrite": {"false"},
|
||||
},
|
||||
}
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
leaf = f.opt.Enc.FromStandardName(leaf)
|
||||
// FIXME this appears to be a bug in the API
|
||||
//
|
||||
// If you set the modified time via PATCH then the server
|
||||
|
@ -1119,7 +1125,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcLeaf = enc.FromStandardName(srcLeaf)
|
||||
srcLeaf = f.opt.Enc.FromStandardName(srcLeaf)
|
||||
_ = srcParentID
|
||||
|
||||
// Create temporary object
|
||||
|
@ -1127,7 +1133,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dstLeaf = enc.FromStandardName(dstLeaf)
|
||||
dstLeaf = f.opt.Enc.FromStandardName(dstLeaf)
|
||||
|
||||
sameName := strings.ToLower(srcLeaf) == strings.ToLower(dstLeaf)
|
||||
if sameName && srcParentID == dstParentID {
|
||||
|
@ -1390,7 +1396,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
leaf = o.fs.opt.Enc.FromStandardName(leaf)
|
||||
var req = api.UploadRequest{
|
||||
Method: "standard",
|
||||
Raw: true,
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
|
@ -25,6 +26,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
@ -60,10 +62,13 @@ Rclone will still chunk files bigger than chunk_size when doing normal
|
|||
copy operations.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.Swift,
|
||||
}}
|
||||
|
||||
const enc = encodings.Swift
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
|
@ -187,26 +192,27 @@ provider.`,
|
|||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
User string `config:"user"`
|
||||
Key string `config:"key"`
|
||||
Auth string `config:"auth"`
|
||||
UserID string `config:"user_id"`
|
||||
Domain string `config:"domain"`
|
||||
Tenant string `config:"tenant"`
|
||||
TenantID string `config:"tenant_id"`
|
||||
TenantDomain string `config:"tenant_domain"`
|
||||
Region string `config:"region"`
|
||||
StorageURL string `config:"storage_url"`
|
||||
AuthToken string `config:"auth_token"`
|
||||
AuthVersion int `config:"auth_version"`
|
||||
ApplicationCredentialID string `config:"application_credential_id"`
|
||||
ApplicationCredentialName string `config:"application_credential_name"`
|
||||
ApplicationCredentialSecret string `config:"application_credential_secret"`
|
||||
StoragePolicy string `config:"storage_policy"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
NoChunk bool `config:"no_chunk"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
User string `config:"user"`
|
||||
Key string `config:"key"`
|
||||
Auth string `config:"auth"`
|
||||
UserID string `config:"user_id"`
|
||||
Domain string `config:"domain"`
|
||||
Tenant string `config:"tenant"`
|
||||
TenantID string `config:"tenant_id"`
|
||||
TenantDomain string `config:"tenant_domain"`
|
||||
Region string `config:"region"`
|
||||
StorageURL string `config:"storage_url"`
|
||||
AuthToken string `config:"auth_token"`
|
||||
AuthVersion int `config:"auth_version"`
|
||||
ApplicationCredentialID string `config:"application_credential_id"`
|
||||
ApplicationCredentialName string `config:"application_credential_name"`
|
||||
ApplicationCredentialSecret string `config:"application_credential_secret"`
|
||||
StoragePolicy string `config:"storage_policy"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
NoChunk bool `config:"no_chunk"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote swift server
|
||||
|
@ -325,7 +331,7 @@ func parsePath(path string) (root string) {
|
|||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (container, containerPath string) {
|
||||
container, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(container), enc.FromStandardPath(containerPath)
|
||||
return f.opt.Enc.FromStandardName(container), f.opt.Enc.FromStandardPath(containerPath)
|
||||
}
|
||||
|
||||
// split returns container and containerPath from the object
|
||||
|
@ -446,7 +452,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
|||
// Check to see if the object exists - ignoring directory markers
|
||||
var info swift.Object
|
||||
var err error
|
||||
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
|
||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
info, rxHeaders, err = f.c.Object(f.rootContainer, encodedDirectory)
|
||||
|
@ -559,7 +565,7 @@ func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer
|
|||
if !recurse {
|
||||
isDirectory = strings.HasSuffix(object.Name, "/")
|
||||
}
|
||||
remote := enc.ToStandardPath(object.Name)
|
||||
remote := f.opt.Enc.ToStandardPath(object.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
|
@ -642,7 +648,7 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
|||
}
|
||||
for _, container := range containers {
|
||||
f.cache.MarkOK(container.Name)
|
||||
d := fs.NewDir(enc.ToStandardName(container.Name), time.Time{}).SetSize(container.Bytes).SetItems(container.Count)
|
||||
d := fs.NewDir(f.opt.Enc.ToStandardName(container.Name), time.Time{}).SetSize(container.Bytes).SetItems(container.Count)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
return entries, nil
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
|
@ -30,8 +31,6 @@ import (
|
|||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.Yandex
|
||||
|
||||
//oAuth
|
||||
const (
|
||||
rcloneClientID = "ac39b43b9eba4cae8ffb788c06d816a8"
|
||||
|
@ -80,14 +79,20 @@ func init() {
|
|||
Help: "Remove existing public link to file/folder with link command rather than creating.\nDefault is false, meaning link command will create or retrieve public link.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encodings.Yandex,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Token string `config:"token"`
|
||||
Unlink bool `config:"unlink"`
|
||||
Token string `config:"token"`
|
||||
Unlink bool `config:"unlink"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote yandex
|
||||
|
@ -210,7 +215,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.
|
|||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(path))
|
||||
opts.Parameters.Set("path", f.opt.Enc.FromStandardPath(path))
|
||||
|
||||
if options.SortMode != nil {
|
||||
opts.Parameters.Set("sort", options.SortMode.String())
|
||||
|
@ -237,7 +242,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.
|
|||
return nil, err
|
||||
}
|
||||
|
||||
info.Name = enc.ToStandardName(info.Name)
|
||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
|
@ -364,7 +369,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
if info.ResourceType == "dir" {
|
||||
//list all subdirs
|
||||
for _, element := range info.Embedded.Items {
|
||||
element.Name = enc.ToStandardName(element.Name)
|
||||
element.Name = f.opt.Enc.ToStandardName(element.Name)
|
||||
remote := path.Join(dir, element.Name)
|
||||
entry, err := f.itemToDirEntry(ctx, remote, &element)
|
||||
if err != nil {
|
||||
|
@ -467,7 +472,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (err error) {
|
|||
if strings.IndexRune(path, ':') >= 0 {
|
||||
path = "disk:" + path
|
||||
}
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(path))
|
||||
opts.Parameters.Set("path", f.opt.Enc.FromStandardPath(path))
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
|
@ -581,7 +586,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err erro
|
|||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(path))
|
||||
opts.Parameters.Set("path", f.opt.Enc.FromStandardPath(path))
|
||||
opts.Parameters.Set("permanently", strconv.FormatBool(hardDelete))
|
||||
|
||||
var resp *http.Response
|
||||
|
@ -653,8 +658,8 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite
|
|||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("from", enc.FromStandardPath(src))
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(dst))
|
||||
opts.Parameters.Set("from", f.opt.Enc.FromStandardPath(src))
|
||||
opts.Parameters.Set("path", f.opt.Enc.FromStandardPath(dst))
|
||||
opts.Parameters.Set("overwrite", strconv.FormatBool(overwrite))
|
||||
|
||||
var resp *http.Response
|
||||
|
@ -803,12 +808,12 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
|||
}
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: enc.FromStandardPath(path),
|
||||
Path: f.opt.Enc.FromStandardPath(path),
|
||||
Parameters: url.Values{},
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(f.filePath(remote)))
|
||||
opts.Parameters.Set("path", f.opt.Enc.FromStandardPath(f.filePath(remote)))
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
|
@ -994,7 +999,7 @@ func (o *Object) setCustomProperty(ctx context.Context, property string, value s
|
|||
NoResponse: true,
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(o.filePath()))
|
||||
opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath()))
|
||||
rcm := map[string]interface{}{
|
||||
property: value,
|
||||
}
|
||||
|
@ -1031,7 +1036,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(o.filePath()))
|
||||
opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath()))
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &dl)
|
||||
|
@ -1068,7 +1073,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeT
|
|||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(o.filePath()))
|
||||
opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath()))
|
||||
opts.Parameters.Set("overwrite", strconv.FormatBool(overwrite))
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
|
|
|
@ -60,6 +60,12 @@ const (
|
|||
// ConfigTokenURL is the config key used to store the token server endpoint
|
||||
ConfigTokenURL = "token_url"
|
||||
|
||||
// ConfigEncoding is the config key to change the encoding for a backend
|
||||
ConfigEncoding = "encoding"
|
||||
|
||||
// ConfigEncodingHelp is the help for ConfigEncoding
|
||||
ConfigEncodingHelp = "This sets the encoding for the backend.\n\nSee: the [encoding section in the overview](/overview/#encoding) for more info."
|
||||
|
||||
// ConfigAuthorize indicates that we just want "rclone authorize"
|
||||
ConfigAuthorize = "config_authorize"
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package config
|
||||
package config_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -28,8 +29,8 @@ func TestRc(t *testing.T) {
|
|||
out, err := call.Fn(context.Background(), in)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, out)
|
||||
assert.Equal(t, "local", FileGet(testName, "type"))
|
||||
assert.Equal(t, "sausage", FileGet(testName, "test_key"))
|
||||
assert.Equal(t, "local", config.FileGet(testName, "type"))
|
||||
assert.Equal(t, "sausage", config.FileGet(testName, "test_key"))
|
||||
|
||||
// The sub tests rely on the remote created above but they can
|
||||
// all be run independently
|
||||
|
@ -92,9 +93,9 @@ func TestRc(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
assert.Nil(t, out)
|
||||
|
||||
assert.Equal(t, "local", FileGet(testName, "type"))
|
||||
assert.Equal(t, "rutabaga", FileGet(testName, "test_key"))
|
||||
assert.Equal(t, "cabbage", FileGet(testName, "test_key2"))
|
||||
assert.Equal(t, "local", config.FileGet(testName, "type"))
|
||||
assert.Equal(t, "rutabaga", config.FileGet(testName, "test_key"))
|
||||
assert.Equal(t, "cabbage", config.FileGet(testName, "test_key2"))
|
||||
})
|
||||
|
||||
t.Run("Password", func(t *testing.T) {
|
||||
|
@ -111,9 +112,9 @@ func TestRc(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
assert.Nil(t, out)
|
||||
|
||||
assert.Equal(t, "local", FileGet(testName, "type"))
|
||||
assert.Equal(t, "rutabaga", obscure.MustReveal(FileGet(testName, "test_key")))
|
||||
assert.Equal(t, "cabbage", obscure.MustReveal(FileGet(testName, "test_key2")))
|
||||
assert.Equal(t, "local", config.FileGet(testName, "type"))
|
||||
assert.Equal(t, "rutabaga", obscure.MustReveal(config.FileGet(testName, "test_key")))
|
||||
assert.Equal(t, "cabbage", obscure.MustReveal(config.FileGet(testName, "test_key2")))
|
||||
})
|
||||
|
||||
// Delete the test remote
|
||||
|
@ -125,8 +126,8 @@ func TestRc(t *testing.T) {
|
|||
out, err = call.Fn(context.Background(), in)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, out)
|
||||
assert.Equal(t, "", FileGet(testName, "type"))
|
||||
assert.Equal(t, "", FileGet(testName, "test_key"))
|
||||
assert.Equal(t, "", config.FileGet(testName, "type"))
|
||||
assert.Equal(t, "", config.FileGet(testName, "test_key"))
|
||||
}
|
||||
|
||||
func TestRcProviders(t *testing.T) {
|
||||
|
|
Loading…
Reference in New Issue
Block a user