mirror of
https://github.com/rclone/rclone.git
synced 2024-11-25 17:57:44 +08:00
doc: fix typos throughout docs and code
This commit is contained in:
parent
5f71d186b2
commit
4aee962233
|
@ -158,7 +158,7 @@ with modules beneath.
|
|||
* fserrors - rclone specific error handling
|
||||
* fshttp - http handling for rclone
|
||||
* fspath - path handling for rclone
|
||||
* hash - defines rclones hash types and functions
|
||||
* hash - defines rclone's hash types and functions
|
||||
* list - list a remote
|
||||
* log - logging facilities
|
||||
* march - iterates directories in lock step
|
||||
|
@ -295,7 +295,7 @@ If you need to update a dependency then run
|
|||
GO111MODULE=on go get -u github.com/pkg/errors
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
Check in in a single commit as above.
|
||||
Check in a single commit as above.
|
||||
|
||||
## Updating all the dependencies ##
|
||||
|
||||
|
|
|
@ -169,7 +169,7 @@ type Fs struct {
|
|||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
|
||||
// Object describes a acd object
|
||||
// Object describes an acd object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
|
@ -229,7 +229,7 @@ func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
|||
}
|
||||
// Work around receiving this error sporadically on authentication
|
||||
//
|
||||
// HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
|
||||
// HTTP code 403: "403 Forbidden", response body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
|
||||
if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") {
|
||||
fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry")
|
||||
return true, err
|
||||
|
|
|
@ -201,7 +201,7 @@ type Fs struct {
|
|||
pool *pool.Pool // memory pool
|
||||
}
|
||||
|
||||
// Object describes a azure object
|
||||
// Object describes an azure object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
|
@ -338,7 +338,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|||
}
|
||||
|
||||
// httpClientFactory creates a Factory object that sends HTTP requests
|
||||
// to a rclone's http.Client.
|
||||
// to an rclone's http.Client.
|
||||
//
|
||||
// copied from azblob.newDefaultHTTPClientFactory
|
||||
func httpClientFactory(client *http.Client) pipeline.Factory {
|
||||
|
|
|
@ -296,7 +296,7 @@ func (f *Fs) Features() *fs.Features {
|
|||
return f.features
|
||||
}
|
||||
|
||||
// parsePath parses an box 'url'
|
||||
// parsePath parses a box 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
|
|
|
@ -217,7 +217,7 @@ func decodeFileName(in string) ([]byte, error) {
|
|||
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
|
||||
// Rogaway.
|
||||
//
|
||||
// This makes for determinstic encryption which is what we want - the
|
||||
// This makes for deterministic encryption which is what we want - the
|
||||
// same filename must encrypt to the same thing.
|
||||
//
|
||||
// This means that
|
||||
|
|
|
@ -929,7 +929,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||
assert.Equal(t, 0, n)
|
||||
}
|
||||
|
||||
// Now try decoding it with a open/seek
|
||||
// Now try decoding it with an open/seek
|
||||
for _, offset := range trials {
|
||||
for _, limit := range limits {
|
||||
if offset+limit > len(plaintext) {
|
||||
|
|
|
@ -241,7 +241,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
|||
*entries = append(*entries, f.newObject(obj))
|
||||
}
|
||||
|
||||
// Encrypt an directory file name to entries.
|
||||
// Encrypt a directory file name to entries.
|
||||
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
|
||||
remote := dir.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||
|
@ -943,7 +943,7 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
|||
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
|
||||
// Prefer direct interface assertion
|
||||
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
|
||||
// Otherwise likely is a operations.OverrideRemote
|
||||
// Otherwise likely is an operations.OverrideRemote
|
||||
srcObj = do.UnWrap()
|
||||
} else {
|
||||
return "", nil
|
||||
|
|
|
@ -82,7 +82,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
|||
|
||||
var oi fs.ObjectInfo = obj
|
||||
if wrap {
|
||||
// wrap the object in a fs.ObjectUnwrapper if required
|
||||
// wrap the object in an fs.ObjectUnwrapper if required
|
||||
oi = testWrapper{oi}
|
||||
}
|
||||
|
||||
|
|
|
@ -1220,7 +1220,7 @@ func (f *Fs) getFileFields() (fields googleapi.Field) {
|
|||
return fields
|
||||
}
|
||||
|
||||
// newRegularObject creates a fs.Object for a normal drive.File
|
||||
// newRegularObject creates an fs.Object for a normal drive.File
|
||||
func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
|
||||
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
|
@ -1239,7 +1239,7 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
|
|||
}
|
||||
}
|
||||
|
||||
// newDocumentObject creates a fs.Object for a google docs drive.File
|
||||
// newDocumentObject creates an fs.Object for a google docs drive.File
|
||||
func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
|
||||
mediaType, _, err := mime.ParseMediaType(exportMimeType)
|
||||
if err != nil {
|
||||
|
@ -1270,7 +1270,7 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
|
|||
}, nil
|
||||
}
|
||||
|
||||
// newLinkObject creates a fs.Object that represents a link a google docs drive.File
|
||||
// newLinkObject creates an fs.Object that represents a link a google docs drive.File
|
||||
func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
|
||||
t := linkTemplate(exportMimeType)
|
||||
if t == nil {
|
||||
|
@ -1296,9 +1296,9 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
|
|||
}, nil
|
||||
}
|
||||
|
||||
// newObjectWithInfo creates a fs.Object for any drive.File
|
||||
// newObjectWithInfo creates an fs.Object for any drive.File
|
||||
//
|
||||
// When the drive.File cannot be represented as a fs.Object it will return (nil, nil).
|
||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
|
||||
// If item has MD5 sum or a length it is a file stored on drive
|
||||
if info.Md5Checksum != "" || info.Size > 0 {
|
||||
|
@ -1309,9 +1309,9 @@ func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, erro
|
|||
return f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
|
||||
}
|
||||
|
||||
// newObjectWithExportInfo creates a fs.Object for any drive.File and the result of findExportFormat
|
||||
// newObjectWithExportInfo creates an fs.Object for any drive.File and the result of findExportFormat
|
||||
//
|
||||
// When the drive.File cannot be represented as a fs.Object it will return (nil, nil).
|
||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithExportInfo(
|
||||
remote string, info *drive.File,
|
||||
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
|
||||
|
@ -1629,7 +1629,7 @@ func (s listRSlices) Less(i, j int) bool {
|
|||
return s.dirs[i] < s.dirs[j]
|
||||
}
|
||||
|
||||
// listRRunner will read dirIDs from the in channel, perform the file listing an call cb with each DirEntry.
|
||||
// listRRunner will read dirIDs from the in channel, perform the file listing and call cb with each DirEntry.
|
||||
//
|
||||
// In each cycle it will read up to grouping entries from the in channel without blocking.
|
||||
// If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
|
||||
|
@ -1788,7 +1788,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||
for len(overflow) > 0 {
|
||||
mu.Lock()
|
||||
l := len(overflow)
|
||||
// only fill half of the channel to prevent entries beeing put into overflow again
|
||||
// only fill half of the channel to prevent entries being put into overflow again
|
||||
if l > inputBuffer/2 {
|
||||
l = inputBuffer / 2
|
||||
}
|
||||
|
@ -1922,8 +1922,8 @@ func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error)
|
|||
return newItem, nil
|
||||
}
|
||||
|
||||
// itemToDirEntry converts a drive.File to a fs.DirEntry.
|
||||
// When the drive.File cannot be represented as a fs.DirEntry
|
||||
// itemToDirEntry converts a drive.File to an fs.DirEntry.
|
||||
// When the drive.File cannot be represented as an fs.DirEntry
|
||||
// (nil, nil) is returned.
|
||||
func (f *Fs) itemToDirEntry(remote string, item *drive.File) (entry fs.DirEntry, err error) {
|
||||
switch {
|
||||
|
@ -3144,7 +3144,7 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
|
|||
return req, res, nil
|
||||
}
|
||||
|
||||
// openDocumentFile represents an documentObject open for reading.
|
||||
// openDocumentFile represents a documentObject open for reading.
|
||||
// Updates the object size after read successfully.
|
||||
type openDocumentFile struct {
|
||||
o *documentObject // Object we are reading for
|
||||
|
|
|
@ -72,7 +72,7 @@ func init() {
|
|||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// The FTP protocal can't handle trailing spaces (for instance
|
||||
// The FTP protocol can't handle trailing spaces (for instance
|
||||
// pureftpd turns them into _)
|
||||
//
|
||||
// proftpd can't handle '*' in file names
|
||||
|
|
|
@ -17,7 +17,7 @@ type Error struct {
|
|||
Details ErrorDetails `json:"error"`
|
||||
}
|
||||
|
||||
// Error statisfies error interface
|
||||
// Error satisfies error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
|
||||
}
|
||||
|
|
|
@ -224,7 +224,7 @@ func (ds dirPatterns) mustCompile() dirPatterns {
|
|||
return ds
|
||||
}
|
||||
|
||||
// match finds the path passed in in the matching structure and
|
||||
// match finds the path passed in the matching structure and
|
||||
// returns the parameters and a pointer to the match, or nil.
|
||||
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
|
||||
itemPath = strings.Trim(itemPath, "/")
|
||||
|
|
|
@ -21,7 +21,7 @@ func newAuth(f *Fs) *auth {
|
|||
}
|
||||
}
|
||||
|
||||
// Request constructs a http.Request for authentication
|
||||
// Request constructs an http.Request for authentication
|
||||
//
|
||||
// returns nil for not needed
|
||||
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
||||
|
|
|
@ -235,7 +235,7 @@ func (f *Fs) Features() *fs.Features {
|
|||
return f.features
|
||||
}
|
||||
|
||||
// parsePath parses an box 'url'
|
||||
// parsePath parses a box 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
|
@ -454,7 +454,7 @@ func errorHandler(resp *http.Response) error {
|
|||
return errResponse
|
||||
}
|
||||
|
||||
// Jottacloud want's '+' to be URL encoded even though the RFC states it's not reserved
|
||||
// Jottacloud wants '+' to be URL encoded even though the RFC states it's not reserved
|
||||
func urlPathEscape(in string) string {
|
||||
return strings.Replace(rest.URLPathEscape(in), "+", "%2B", -1)
|
||||
}
|
||||
|
@ -464,7 +464,7 @@ func (f *Fs) filePathRaw(file string) string {
|
|||
return path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, file)
|
||||
// filePath returns an escaped file path (f.root, file)
|
||||
func (f *Fs) filePath(file string) string {
|
||||
return urlPathEscape(f.filePathRaw(file))
|
||||
}
|
||||
|
@ -493,7 +493,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||
return nil, errors.New("Outdated config - please reconfigure this backend")
|
||||
}
|
||||
|
||||
// if custome endpoints are set use them else stick with defaults
|
||||
// if custom endpoints are set use them else stick with defaults
|
||||
if tokenURL, ok := m.Get(configTokenURL); ok {
|
||||
oauthConfig.Endpoint.TokenURL = tokenURL
|
||||
// jottacloud is weird. we need to use the tokenURL as authURL
|
||||
|
@ -1105,7 +1105,7 @@ func (o *Object) Remote() string {
|
|||
return o.remote
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, remote)
|
||||
// filePath returns an escaped file path (f.root, remote)
|
||||
func (o *Object) filePath() string {
|
||||
return o.fs.filePath(o.remote)
|
||||
}
|
||||
|
|
|
@ -421,7 +421,7 @@ func translateErrorsObject(err error) error {
|
|||
}
|
||||
|
||||
// mkdir creates a directory at the given remote path. Creates ancestors if
|
||||
// neccessary
|
||||
// necessary
|
||||
func (f *Fs) mkdir(fullPath string) error {
|
||||
if fullPath == "/" {
|
||||
return nil
|
||||
|
|
|
@ -402,7 +402,7 @@ func (q *quirks) parseQuirks(option string) {
|
|||
// "Accept-Encoding: gzip" header. However, enabling compression
|
||||
// might be good for performance.
|
||||
// Use this quirk to investigate the performance impact.
|
||||
// Remove this quirk if perfomance does not improve.
|
||||
// Remove this quirk if performance does not improve.
|
||||
q.gzip = true
|
||||
case "insecure":
|
||||
// The mailru disk-o protocol is not documented. To compare HTTP
|
||||
|
|
|
@ -150,7 +150,7 @@ func (f *Fs) Features() *fs.Features {
|
|||
return f.features
|
||||
}
|
||||
|
||||
// parsePath parses an mega 'url'
|
||||
// parsePath parses a mega 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
|
|
|
@ -272,19 +272,19 @@ type CreateShareLinkResponse struct {
|
|||
} `json:"link"`
|
||||
}
|
||||
|
||||
// AsyncOperationStatus provides information on the status of a asynchronous job progress.
|
||||
// AsyncOperationStatus provides information on the status of an asynchronous job progress.
|
||||
//
|
||||
// The following API calls return AsyncOperationStatus resources:
|
||||
//
|
||||
// Copy Item
|
||||
// Upload From URL
|
||||
type AsyncOperationStatus struct {
|
||||
PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete.
|
||||
PercentageComplete float64 `json:"percentageComplete"` // A float value between 0 and 100 that indicates the percentage complete.
|
||||
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
|
||||
}
|
||||
|
||||
// GetID returns a normalized ID of the item
|
||||
// If DriveID is known it will be prefixed to the ID with # seperator
|
||||
// If DriveID is known it will be prefixed to the ID with # separator
|
||||
// Can be parsed using onedrive.parseNormalizedID(normalizedID)
|
||||
func (i *Item) GetID() string {
|
||||
if i.IsRemote() && i.RemoteItem.ID != "" {
|
||||
|
|
|
@ -396,7 +396,7 @@ func (f *Fs) Features() *fs.Features {
|
|||
return f.features
|
||||
}
|
||||
|
||||
// parsePath parses an one drive 'url'
|
||||
// parsePath parses a one drive 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
|
@ -1310,7 +1310,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||
return hash.Set(QuickXorHashType)
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without accout.
|
||||
// PublicLink returns a link for downloading without account.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
info, _, err := f.readMetaDataForPath(ctx, f.rootPath(remote))
|
||||
if err != nil {
|
||||
|
|
|
@ -677,7 +677,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||
}
|
||||
|
||||
if "" == o.id {
|
||||
// We need to create a ID for this file
|
||||
// We need to create an ID for this file
|
||||
var resp *http.Response
|
||||
response := createFileResponse{}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
|
|
|
@ -18,13 +18,13 @@ func (e *Error) Error() string {
|
|||
return fmt.Sprintf("%s (Error %d)", e.Info.Message, e.Info.Code)
|
||||
}
|
||||
|
||||
// Account describes a OpenDRIVE account
|
||||
// Account describes an OpenDRIVE account
|
||||
type Account struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"passwd"`
|
||||
}
|
||||
|
||||
// UserSessionInfo describes a OpenDRIVE session
|
||||
// UserSessionInfo describes an OpenDRIVE session
|
||||
type UserSessionInfo struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"passwd"`
|
||||
|
@ -45,7 +45,7 @@ type UserSessionInfo struct {
|
|||
PartnerUsersDomain string `json:"PartnerUsersDomain"`
|
||||
}
|
||||
|
||||
// FolderList describes a OpenDRIVE listing
|
||||
// FolderList describes an OpenDRIVE listing
|
||||
type FolderList struct {
|
||||
// DirUpdateTime string `json:"DirUpdateTime,string"`
|
||||
Name string `json:"Name"`
|
||||
|
@ -56,7 +56,7 @@ type FolderList struct {
|
|||
Files []File `json:"Files"`
|
||||
}
|
||||
|
||||
// Folder describes a OpenDRIVE folder
|
||||
// Folder describes an OpenDRIVE folder
|
||||
type Folder struct {
|
||||
FolderID string `json:"FolderID"`
|
||||
Name string `json:"Name"`
|
||||
|
@ -109,7 +109,7 @@ type removeFolder struct {
|
|||
FolderID string `json:"folder_id"`
|
||||
}
|
||||
|
||||
// File describes a OpenDRIVE file
|
||||
// File describes an OpenDRIVE file
|
||||
type File struct {
|
||||
FileID string `json:"FileId"`
|
||||
FileHash string `json:"FileHash"`
|
||||
|
|
|
@ -152,7 +152,7 @@ func (f *Fs) Features() *fs.Features {
|
|||
return f.features
|
||||
}
|
||||
|
||||
// parsePath parses an pcloud 'url'
|
||||
// parsePath parses a pcloud 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
|
|
|
@ -10,7 +10,7 @@ type Response struct {
|
|||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
// Error satisfies the error interface
|
||||
func (e *Response) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Status, e.Message)
|
||||
}
|
||||
|
|
|
@ -203,7 +203,7 @@ func (o *Object) split() (bucket, bucketPath string) {
|
|||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
// Split an URL into three parts: protocol host and port
|
||||
// Split a URL into three parts: protocol host and port
|
||||
func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
|
||||
/*
|
||||
Pattern to match an endpoint,
|
||||
|
|
|
@ -22,7 +22,7 @@ const (
|
|||
// maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor
|
||||
// maxMultiPartSize = 1024 * 1024 * 1024 * 1 // The maximum allowed part size when uploading a part to QingStor
|
||||
minMultiPartSize = 1024 * 1024 * 4 // The minimum allowed part size when uploading a part to QingStor
|
||||
maxMultiParts = 10000 // The maximum allowed number of parts in an multi-part upload
|
||||
maxMultiParts = 10000 // The maximum allowed number of parts in a multi-part upload
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -168,7 +168,7 @@ func (u *uploader) singlePartUpload(buf io.Reader, size int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Upload upload a object into QingStor
|
||||
// Upload upload an object into QingStor
|
||||
func (u *uploader) upload() error {
|
||||
u.init()
|
||||
|
||||
|
@ -297,7 +297,7 @@ func (mu *multiUploader) send(c chunk) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// complete complete an multipart upload
|
||||
// complete complete a multipart upload
|
||||
func (mu *multiUploader) complete() error {
|
||||
var err error
|
||||
if err = mu.getErr(); err != nil {
|
||||
|
@ -324,7 +324,7 @@ func (mu *multiUploader) complete() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// abort abort an multipart upload
|
||||
// abort abort a multipart upload
|
||||
func (mu *multiUploader) abort() error {
|
||||
var err error
|
||||
bucketInit, _ := mu.bucketInit()
|
||||
|
@ -342,7 +342,7 @@ func (mu *multiUploader) abort() error {
|
|||
|
||||
// multiPartUpload upload a multiple object into QingStor
|
||||
func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) (err error) {
|
||||
// Initiate an multi-part upload
|
||||
// Initiate a multi-part upload
|
||||
if err = mu.initiate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -677,7 +677,7 @@ isn't set then "acl" is used instead.`,
|
|||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: "If using SSE-C you must provide the secret encyption key used to encrypt/decrypt your data.",
|
||||
Help: "If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data.",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
|
|
|
@ -212,7 +212,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||
}
|
||||
fs.Debugf(nil, "Seafile server version %s", serverInfo.Version)
|
||||
|
||||
// We don't support bellow seafile v6.0 (version 6.0 is already more than 3 years old)
|
||||
// We don't support lower than seafile v6.0 (version 6.0 is already more than 3 years old)
|
||||
serverVersion := semver.New(serverInfo.Version)
|
||||
if serverVersion.Major < 6 {
|
||||
return nil, errors.New("unsupported Seafile server (version < 6.0)")
|
||||
|
|
|
@ -1058,7 +1058,7 @@ func (f *Fs) renameFileAPIv2(ctx context.Context, libraryID, filePath, newname s
|
|||
// No luck with JSON input with the older api2
|
||||
postParameters := url.Values{
|
||||
"operation": {"rename"},
|
||||
"reloaddir": {"true"}, // This is an undocumented trick to avoid a http code 301 response (found in https://github.com/haiwen/seahub/blob/master/seahub/api2/views.py)
|
||||
"reloaddir": {"true"}, // This is an undocumented trick to avoid an http code 301 response (found in https://github.com/haiwen/seahub/blob/master/seahub/api2/views.py)
|
||||
"newname": {f.opt.Enc.FromStandardName(newname)},
|
||||
}
|
||||
|
||||
|
|
|
@ -485,7 +485,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||
return NewFsWithConnection(ctx, name, root, m, opt, sshConfig)
|
||||
}
|
||||
|
||||
// NewFsWithConnection creates a new Fs object from the name and root and a ssh.ClientConfig. It connects to
|
||||
// NewFsWithConnection creates a new Fs object from the name and root and an ssh.ClientConfig. It connects to
|
||||
// the host specified in the ssh.ClientConfig
|
||||
func NewFsWithConnection(ctx context.Context, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
|
@ -1036,7 +1036,7 @@ func parseHash(bytes []byte) string {
|
|||
|
||||
// Parses the byte array output from the SSH session
|
||||
// returned by an invocation of df into
|
||||
// the disk size, used space, and avaliable space on the disk, in that order.
|
||||
// the disk size, used space, and available space on the disk, in that order.
|
||||
// Only works when `df` has output info on only one disk
|
||||
func parseUsage(bytes []byte) (spaceTotal int64, spaceUsed int64, spaceAvail int64) {
|
||||
spaceTotal, spaceUsed, spaceAvail = -1, -1, -1
|
||||
|
|
|
@ -102,7 +102,7 @@ type UploadSpecification struct {
|
|||
MaxNumberOfThreads int `json:"MaxNumberOfThreads"` // Specifies the max number of chunks that can be sent simultaneously for threaded uploads
|
||||
}
|
||||
|
||||
// UploadFinishResponse is returnes from calling UploadSpecification.FinishURI
|
||||
// UploadFinishResponse is returns from calling UploadSpecification.FinishURI
|
||||
type UploadFinishResponse struct {
|
||||
Error bool `json:"error"`
|
||||
ErrorMessage string `json:"errorMessage"`
|
||||
|
|
|
@ -284,7 +284,7 @@ var retryErrorCodes = []int{
|
|||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
// If this is an swift.Error object extract the HTTP error code
|
||||
// If this is a swift.Error object extract the HTTP error code
|
||||
if swiftError, ok := err.(*swift.Error); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if swiftError.StatusCode == e {
|
||||
|
@ -1253,7 +1253,7 @@ func deleteChunks(o *Object, segmentsContainer string, segmentInfos []string) {
|
|||
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
|
||||
e := o.fs.c.ObjectDelete(segmentsContainer, v)
|
||||
if e != nil {
|
||||
fs.Errorf(o, "Error occured in delete segment file %q on %q , error: %q", v, segmentsContainer, e)
|
||||
fs.Errorf(o, "Error occurred in delete segment file %q on %q , error: %q", v, segmentsContainer, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -669,7 +669,7 @@ func (f *Fs) Rmdir(ctx context.Context, relative string) (err error) {
|
|||
// requirements. In particular, libuplink requires a trailing slash for
|
||||
// listings, but rclone does not always provide one. Further, depending on how
|
||||
// the path was initially path normalization may have removed it (e.g. a
|
||||
// trailing slash from the CLI is removed before it ever get's to the backend
|
||||
// trailing slash from the CLI is removed before it ever gets to the backend
|
||||
// code).
|
||||
func newPrefix(prefix string) string {
|
||||
if prefix == "" {
|
||||
|
|
|
@ -33,7 +33,7 @@ func (e Errors) FilterNil() Errors {
|
|||
return ne
|
||||
}
|
||||
|
||||
// Err returns a error interface that filtered nil,
|
||||
// Err returns an error interface that filtered nil,
|
||||
// or nil if no non-nil Error is presented.
|
||||
func (e Errors) Err() error {
|
||||
ne := e.FilterNil()
|
||||
|
|
|
@ -31,7 +31,7 @@ func (p *All) Create(ctx context.Context, upstreams []*upstream.Fs, path string)
|
|||
return upstreams, nil
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
// CreateEntries is CREATE category policy but receiving a set of candidate entries
|
||||
func (p *All) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
|
|
@ -61,7 +61,7 @@ func (p *EpAll) Action(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||
return p.epall(ctx, upstreams, path)
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
// ActionEntries is ACTION category policy but receivng a set of candidate entries
|
||||
func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
@ -86,7 +86,7 @@ func (p *EpAll) Create(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||
return upstreams, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
// CreateEntries is CREATE category policy but receiving a set of candidate entries
|
||||
func (p *EpAll) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
|
|
@ -61,7 +61,7 @@ func (p *EpFF) Action(ctx context.Context, upstreams []*upstream.Fs, path string
|
|||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
// ActionEntries is ACTION category policy but receiving a set of candidate entries
|
||||
func (p *EpFF) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
@ -86,7 +86,7 @@ func (p *EpFF) Create(ctx context.Context, upstreams []*upstream.Fs, path string
|
|||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
// CreateEntries is CREATE category policy but receiving a set of candidate entries
|
||||
func (p *EpFF) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
@ -106,7 +106,7 @@ func (p *EpFF) Search(ctx context.Context, upstreams []*upstream.Fs, path string
|
|||
return p.epff(ctx, upstreams, path)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
|
||||
func (p *EpFF) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
|
|
@ -65,7 +65,7 @@ func (p *EpLfs) Action(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
// ActionEntries is ACTION category policy but receiving a set of candidate entries
|
||||
func (p *EpLfs) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
|
@ -85,7 +85,7 @@ func (p *EpLfs) Create(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
// CreateEntries is CREATE category policy but receiving a set of candidate entries
|
||||
func (p *EpLfs) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
|
@ -107,7 +107,7 @@ func (p *EpLfs) Search(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||
return p.lfs(upstreams)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
|
||||
func (p *EpLfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
|
|
@ -65,7 +65,7 @@ func (p *EpLno) Action(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
// ActionEntries is ACTION category policy but receiving a set of candidate entries
|
||||
func (p *EpLno) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
|
@ -85,7 +85,7 @@ func (p *EpLno) Create(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
// CreateEntries is CREATE category policy but receiving a set of candidate entries
|
||||
func (p *EpLno) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
|
@ -107,7 +107,7 @@ func (p *EpLno) Search(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||
return p.lno(upstreams)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
|
||||
func (p *EpLno) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
|
|
@ -65,7 +65,7 @@ func (p *EpLus) Action(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
// ActionEntries is ACTION category policy but receiving a set of candidate entries
|
||||
func (p *EpLus) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
|
@ -85,7 +85,7 @@ func (p *EpLus) Create(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
// CreateEntries is CREATE category policy but receiving a set of candidate entries
|
||||
func (p *EpLus) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
|
@ -107,7 +107,7 @@ func (p *EpLus) Search(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||
return p.lus(upstreams)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
|
||||
func (p *EpLus) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
|
|
@ -64,7 +64,7 @@ func (p *EpMfs) Action(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
// ActionEntries is ACTION category policy but receiving a set of candidate entries
|
||||
func (p *EpMfs) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
|
@ -84,7 +84,7 @@ func (p *EpMfs) Create(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
// CreateEntries is CREATE category policy but receiving a set of candidate entries
|
||||
func (p *EpMfs) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
|
@ -106,7 +106,7 @@ func (p *EpMfs) Search(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||
return p.mfs(upstreams)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
// SearchEntries is SEARCH category policy but receivng a set of candidate entries
|
||||
func (p *EpMfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
|
|
@ -38,7 +38,7 @@ func (p *EpRand) Action(ctx context.Context, upstreams []*upstream.Fs, path stri
|
|||
return []*upstream.Fs{p.rand(upstreams)}, nil
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
// ActionEntries is ACTION category policy but receiving a set of candidate entries
|
||||
func (p *EpRand) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
|
@ -56,7 +56,7 @@ func (p *EpRand) Create(ctx context.Context, upstreams []*upstream.Fs, path stri
|
|||
return []*upstream.Fs{p.rand(upstreams)}, nil
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
// CreateEntries is CREATE category policy but receiving a set of candidate entries
|
||||
func (p *EpRand) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
|
@ -77,7 +77,7 @@ func (p *EpRand) Search(ctx context.Context, upstreams []*upstream.Fs, path stri
|
|||
return p.rand(upstreams), nil
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
|
||||
func (p *EpRand) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
|
|
@ -15,7 +15,7 @@ func init() {
|
|||
}
|
||||
|
||||
// Newest policy picks the file / directory with the largest mtime
|
||||
// It implies the existance of a path
|
||||
// It implies the existence of a path
|
||||
type Newest struct {
|
||||
EpAll
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ func (p *Newest) Action(ctx context.Context, upstreams []*upstream.Fs, path stri
|
|||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
// ActionEntries is ACTION category policy but receiving a set of candidate entries
|
||||
func (p *Newest) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
@ -119,7 +119,7 @@ func (p *Newest) Create(ctx context.Context, upstreams []*upstream.Fs, path stri
|
|||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
// CreateEntries is CREATE category policy but receiving a set of candidate entries
|
||||
func (p *Newest) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
@ -140,7 +140,7 @@ func (p *Newest) Search(ctx context.Context, upstreams []*upstream.Fs, path stri
|
|||
return p.newest(ctx, upstreams, path)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
|
||||
func (p *Newest) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
|
|
@ -26,13 +26,13 @@ type Policy interface {
|
|||
// Search category policy, governing the access to files and directories
|
||||
Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error)
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
// ActionEntries is ACTION category policy but receiving a set of candidate entries
|
||||
ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error)
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
// CreateEntries is CREATE category policy but receiving a set of candidate entries
|
||||
CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error)
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
|
||||
SearchEntries(entries ...upstream.Entry) (upstream.Entry, error)
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ func (p *Rand) Action(ctx context.Context, upstreams []*upstream.Fs, path string
|
|||
return []*upstream.Fs{p.rand(upstreams)}, nil
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
// ActionEntries is ACTION category policy but receiving a set of candidate entries
|
||||
func (p *Rand) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.All.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
|
@ -53,7 +53,7 @@ func (p *Rand) Create(ctx context.Context, upstreams []*upstream.Fs, path string
|
|||
return []*upstream.Fs{p.rand(upstreams)}, nil
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
// CreateEntries is CREATE category policy but receiving a set of candidate entries
|
||||
func (p *Rand) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.All.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
|
@ -74,7 +74,7 @@ func (p *Rand) Search(ctx context.Context, upstreams []*upstream.Fs, path string
|
|||
return p.rand(upstreams), nil
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
|
||||
func (p *Rand) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
|
|
@ -100,7 +100,7 @@ func New(remote, root string, cacheTime time.Duration) (*Fs, error) {
|
|||
return f, err
|
||||
}
|
||||
|
||||
// WrapDirectory wraps a fs.Directory to include the info
|
||||
// WrapDirectory wraps an fs.Directory to include the info
|
||||
// of the upstream Fs
|
||||
func (f *Fs) WrapDirectory(e fs.Directory) *Directory {
|
||||
if e == nil {
|
||||
|
@ -112,7 +112,7 @@ func (f *Fs) WrapDirectory(e fs.Directory) *Directory {
|
|||
}
|
||||
}
|
||||
|
||||
// WrapObject wraps a fs.Object to include the info
|
||||
// WrapObject wraps an fs.Object to include the info
|
||||
// of the upstream Fs
|
||||
func (f *Fs) WrapObject(o fs.Object) *Object {
|
||||
if o == nil {
|
||||
|
@ -124,7 +124,7 @@ func (f *Fs) WrapObject(o fs.Object) *Object {
|
|||
}
|
||||
}
|
||||
|
||||
// WrapEntry wraps a fs.DirEntry to include the info
|
||||
// WrapEntry wraps an fs.DirEntry to include the info
|
||||
// of the upstream Fs
|
||||
func (f *Fs) WrapEntry(e fs.DirEntry) (Entry, error) {
|
||||
switch e.(type) {
|
||||
|
|
|
@ -48,7 +48,7 @@ type SuccessResponseBody struct {
|
|||
Token string `xml:"RequestSecurityTokenResponse>RequestedSecurityToken>BinarySecurityToken"`
|
||||
}
|
||||
|
||||
// SharepointError holds a error response microsoft login
|
||||
// SharepointError holds an error response microsoft login
|
||||
type SharepointError struct {
|
||||
XMLName xml.Name `xml:"Envelope"`
|
||||
Body ErrorResponseBody `xml:"Body"`
|
||||
|
@ -58,7 +58,7 @@ func (e *SharepointError) Error() string {
|
|||
return fmt.Sprintf("%s: %s (%s)", e.Body.FaultCode, e.Body.Reason, e.Body.Detail)
|
||||
}
|
||||
|
||||
// ErrorResponseBody contains the body of a erroneous repsonse
|
||||
// ErrorResponseBody contains the body of an erroneous response
|
||||
type ErrorResponseBody struct {
|
||||
XMLName xml.Name
|
||||
FaultCode string `xml:"Fault>Code>Subcode>Value"`
|
||||
|
|
|
@ -200,12 +200,12 @@ func (f *Fs) setRoot(root string) {
|
|||
f.diskRoot = diskRoot
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, file)
|
||||
// filePath returns an escaped file path (f.root, file)
|
||||
func (f *Fs) filePath(file string) string {
|
||||
return path.Join(f.diskRoot, file)
|
||||
}
|
||||
|
||||
// dirPath returns a escaped file path (f.root, file) ending with '/'
|
||||
// dirPath returns an escaped file path (f.root, file) ending with '/'
|
||||
func (f *Fs) dirPath(file string) string {
|
||||
return path.Join(f.diskRoot, file) + "/"
|
||||
}
|
||||
|
@ -502,7 +502,7 @@ func (f *Fs) mkDirs(ctx context.Context, path string) (err error) {
|
|||
|
||||
if err = f.CreateDir(ctx, dirString); err != nil {
|
||||
if apiErr, ok := err.(*api.ErrorResponse); ok {
|
||||
// allready exists
|
||||
// already exists
|
||||
if apiErr.ErrorName != "DiskPathPointsToExistentDirectoryError" {
|
||||
// 2 if it fails then create all directories in the path from root.
|
||||
dirs := strings.Split(dirString, "/") //path separator
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Package cmount implents a FUSE mounting system for rclone remotes.
|
||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||
//
|
||||
// This uses the cgo based cgofuse library
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ var commandDefinition = &cobra.Command{
|
|||
Download a URL's content and copy it to the destination without saving
|
||||
it in temporary storage.
|
||||
|
||||
Setting --auto-filename will cause the file name to be retreived from
|
||||
Setting --auto-filename will cause the file name to be retrieved from
|
||||
the from URL (after any redirections) and used in the destination
|
||||
path.
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Package mount implents a FUSE mounting system for rclone remotes.
|
||||
// Package mount implements a FUSE mounting system for rclone remotes.
|
||||
|
||||
// +build linux,go1.13 darwin,go1.13 freebsd,go1.13
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Package mount implents a FUSE mounting system for rclone remotes.
|
||||
// Package mount implements a FUSE mounting system for rclone remotes.
|
||||
|
||||
// +build linux darwin,amd64
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ func TestRc(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(5), fi.Size())
|
||||
|
||||
// FIXME the OS somtimes appears to be using the mount
|
||||
// FIXME the OS sometimes appears to be using the mount
|
||||
// immediately after it appears so wait a moment
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
|
|
|
@ -181,7 +181,7 @@ func (cds *contentDirectoryService) readContainer(o object, host string) (ret []
|
|||
// Given a list of nodes, separate them into potential media items and any associated resources (external subtitles,
|
||||
// for example.)
|
||||
//
|
||||
// The result is a a slice of potential media nodes (in their original order) and a map containing associated
|
||||
// The result is a slice of potential media nodes (in their original order) and a map containing associated
|
||||
// resources nodes of each media node, if any.
|
||||
func mediaWithResources(nodes vfs.Nodes) (vfs.Nodes, map[vfs.Node]vfs.Nodes) {
|
||||
media, mediaResources := vfs.Nodes{}, make(map[vfs.Node]vfs.Nodes)
|
||||
|
|
|
@ -34,7 +34,7 @@ func init() {
|
|||
var Command = &cobra.Command{
|
||||
Use: "dlna remote:path",
|
||||
Short: `Serve remote:path over DLNA`,
|
||||
Long: `rclone serve dlna is a DLNA media server for media stored in a rclone remote. Many
|
||||
Long: `rclone serve dlna is a DLNA media server for media stored in an rclone remote. Many
|
||||
devices, such as the Xbox and PlayStation, can automatically discover this server in the LAN
|
||||
and play audio/video from it. VLC is also supported. Service discovery uses UDP multicast
|
||||
packets (SSDP) and will thus only work on LANs.
|
||||
|
|
|
@ -123,7 +123,7 @@ func (d *Directory) AddEntry(remote string, isDir bool) {
|
|||
})
|
||||
}
|
||||
|
||||
// Error logs the error and if a ResponseWriter is given it writes a http.StatusInternalServerError
|
||||
// Error logs the error and if a ResponseWriter is given it writes an http.StatusInternalServerError
|
||||
func Error(what interface{}, w http.ResponseWriter, text string, err error) {
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(what, "%s: %v", text, err)
|
||||
|
@ -132,7 +132,7 @@ func Error(what interface{}, w http.ResponseWriter, text string, err error) {
|
|||
}
|
||||
}
|
||||
|
||||
// ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and defailt is namedirfist/asc
|
||||
// ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and default is namedirfist/asc
|
||||
func (d *Directory) ProcessQueryParams(sortParm string, orderParm string) *Directory {
|
||||
d.Sort = sortParm
|
||||
d.Order = orderParm
|
||||
|
|
|
@ -89,7 +89,7 @@ that since |_obscure| is set to |pass|, rclone will obscure the |pass|
|
|||
parameter before creating the backend (which is required for sftp
|
||||
backends).
|
||||
|
||||
The progam can manipulate the supplied |user| in any way, for example
|
||||
The program can manipulate the supplied |user| in any way, for example
|
||||
to make proxy to many different sftp backends, you could make the
|
||||
|user| be |user@example.com| and then set the |host| to |example.com|
|
||||
in the output and the user to |user|. For security you'd probably want
|
||||
|
|
|
@ -51,7 +51,7 @@ type conn struct {
|
|||
what string
|
||||
}
|
||||
|
||||
// execCommand implements an extrememly limited number of commands to
|
||||
// execCommand implements an extremely limited number of commands to
|
||||
// interoperate with the rclone sftp backend
|
||||
func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (err error) {
|
||||
binary, args := command, ""
|
||||
|
|
|
@ -143,7 +143,7 @@ func Tree(fsrc fs.Fs, outFile io.Writer, opts *tree.Options) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// FileInfo maps a fs.DirEntry into an os.FileInfo
|
||||
// FileInfo maps an fs.DirEntry into an os.FileInfo
|
||||
type FileInfo struct {
|
||||
entry fs.DirEntry
|
||||
}
|
||||
|
|
|
@ -222,7 +222,7 @@ There are a couple of issues with Windows `mount` functionality that still requi
|
|||
It should be considered as experimental thus far as fixes come in for this OS.
|
||||
|
||||
Most of the issues seem to be related to the difference between filesystems
|
||||
on Linux flavors and Windows as cache is heavily dependant on them.
|
||||
on Linux flavors and Windows as cache is heavily dependent on them.
|
||||
|
||||
Any reports or feedback on how cache behaves on this OS is greatly appreciated.
|
||||
|
||||
|
|
|
@ -376,7 +376,7 @@ date: "2020-02-01"
|
|||
* march: Fix checking sub-directories when using `--no-traverse` (buengese)
|
||||
* rc
|
||||
* Fix unmarshalable http.AuthFn in options and put in test for marshalability (Nick Craig-Wood)
|
||||
* Move job expire flags to rc to fix initalization problem (Nick Craig-Wood)
|
||||
* Move job expire flags to rc to fix initialization problem (Nick Craig-Wood)
|
||||
* Fix `--loopback` with rc/list and others (Nick Craig-Wood)
|
||||
* rcat: Fix slowdown on systems with multiple hashes (Nick Craig-Wood)
|
||||
* rcd: Fix permissions problems on cache directory with web gui download (Nick Craig-Wood)
|
||||
|
@ -515,7 +515,7 @@ date: "2020-02-01"
|
|||
* Onedrive
|
||||
* More accurately check if root is found (Cnly)
|
||||
* S3
|
||||
* Suppport S3 Accelerated endpoints with `--s3-use-accelerate-endpoint` (Nick Craig-Wood)
|
||||
* Support S3 Accelerated endpoints with `--s3-use-accelerate-endpoint` (Nick Craig-Wood)
|
||||
* Add config info for Wasabi's EU Central endpoint (Robert Marko)
|
||||
* Make SetModTime work for GLACIER while syncing (Philip Harvey)
|
||||
* SFTP
|
||||
|
@ -1295,18 +1295,18 @@ Point release to fix hubic and azureblob backends.
|
|||
* Rclone no longer has any working keys - disable integration tests
|
||||
* Implement DirChangeNotify to notify cache/vfs/mount of changes
|
||||
* Azureblob
|
||||
* Don't check for bucket/container presense if listing was OK
|
||||
* Don't check for bucket/container presence if listing was OK
|
||||
* this makes rclone do one less request per invocation
|
||||
* Improve accounting for chunked uploads
|
||||
* Backblaze B2
|
||||
* Don't check for bucket/container presense if listing was OK
|
||||
* Don't check for bucket/container presence if listing was OK
|
||||
* this makes rclone do one less request per invocation
|
||||
* Box
|
||||
* Improve accounting for chunked uploads
|
||||
* Dropbox
|
||||
* Fix custom oauth client parameters
|
||||
* Google Cloud Storage
|
||||
* Don't check for bucket/container presense if listing was OK
|
||||
* Don't check for bucket/container presence if listing was OK
|
||||
* this makes rclone do one less request per invocation
|
||||
* Google Drive
|
||||
* Migrate to api v3 (Fabian Möller)
|
||||
|
@ -1329,13 +1329,13 @@ Point release to fix hubic and azureblob backends.
|
|||
* Pcloud
|
||||
* Remove unused chunked upload flag and code
|
||||
* Qingstor
|
||||
* Don't check for bucket/container presense if listing was OK
|
||||
* Don't check for bucket/container presence if listing was OK
|
||||
* this makes rclone do one less request per invocation
|
||||
* S3
|
||||
* Support hashes for multipart files (Chris Redekop)
|
||||
* Initial support for IBM COS (S3) (Giri Badanahatti)
|
||||
* Update docs to discourage use of v2 auth with CEPH and others
|
||||
* Don't check for bucket/container presense if listing was OK
|
||||
* Don't check for bucket/container presence if listing was OK
|
||||
* this makes rclone do one less request per invocation
|
||||
* Fix server side copy and set modtime on files with + in
|
||||
* SFTP
|
||||
|
@ -1350,7 +1350,7 @@ Point release to fix hubic and azureblob backends.
|
|||
* Fix refresh of authentication token
|
||||
* in v1.39 a bug was introduced which ignored new tokens - this fixes it
|
||||
* Fix extra HEAD transaction when uploading a new file
|
||||
* Don't check for bucket/container presense if listing was OK
|
||||
* Don't check for bucket/container presence if listing was OK
|
||||
* this makes rclone do one less request per invocation
|
||||
* Webdav
|
||||
* Add new time formats to support mydrive.ch and others
|
||||
|
@ -1375,7 +1375,7 @@ Point release to fix hubic and azureblob backends.
|
|||
* curl install for rclone (Filip Bartodziej)
|
||||
* --stats now shows percentage, size, rate and ETA in condensed form (Ishuah Kariuki)
|
||||
* --exclude-if-present to exclude a directory if a file is present (Iakov Davydov)
|
||||
* rmdirs: add --leave-root flag (lewpam)
|
||||
* rmdirs: add --leave-root flag (lewapm)
|
||||
* move: add --delete-empty-src-dirs flag to remove dirs after move (Ishuah Kariuki)
|
||||
* Add --dump flag, introduce --dump requests, responses and remove --dump-auth, --dump-filters
|
||||
* Obscure X-Auth-Token: from headers when dumping too
|
||||
|
@ -2086,7 +2086,7 @@ Point release to fix hubic and azureblob backends.
|
|||
* New features
|
||||
* Amazon Drive support
|
||||
* Oauth support redone - fix many bugs and improve usability
|
||||
* Use "golang.org/x/oauth2" as oauth libary of choice
|
||||
* Use "golang.org/x/oauth2" as oauth library of choice
|
||||
* Improve oauth usability for smoother initial signup
|
||||
* drive, googlecloudstorage: optionally use auto config for the oauth token
|
||||
* Implement --dump-headers and --dump-bodies debug flags
|
||||
|
|
|
@ -7,7 +7,7 @@ date: "2018-08-07"
|
|||
<i class="fa fa-cloud"></i> Jottacloud
|
||||
-----------------------------------------
|
||||
|
||||
Jottacoud is a cloud storage service provider from a Norwegian company, using its own datacenters in Norway.
|
||||
Jottacloud is a cloud storage service provider from a Norwegian company, using its own datacenters in Norway.
|
||||
|
||||
In addition to the official service at [jottacloud.com](https://www.jottacloud.com/), there are
|
||||
also several whitelabel versions which should work with this backend.
|
||||
|
|
|
@ -359,7 +359,7 @@ func (acc *Account) progress() (bytes, size int64) {
|
|||
}
|
||||
|
||||
// speed returns the speed of the current file transfer
|
||||
// in bytes per second, as well a an exponentially weighted moving average
|
||||
// in bytes per second, as well an exponentially weighted moving average
|
||||
// If no read has completed yet, 0 is returned for both values.
|
||||
func (acc *Account) speed() (bps, current float64) {
|
||||
if acc == nil {
|
||||
|
|
|
@ -166,7 +166,7 @@ Returns the following values:
|
|||
"bytes": total transferred bytes for this file,
|
||||
"checked": if the transfer is only checked (skipped, deleted),
|
||||
"timestamp": integer representing millisecond unix epoch,
|
||||
"error": string description of the error (empty if successfull),
|
||||
"error": string description of the error (empty if successful),
|
||||
"jobid": id of the job that this transfer belongs to
|
||||
}
|
||||
]
|
||||
|
|
6
fs/cache/cache.go
vendored
6
fs/cache/cache.go
vendored
|
@ -37,7 +37,7 @@ func addMapping(fsString, canonicalName string) {
|
|||
mu.Unlock()
|
||||
}
|
||||
|
||||
// GetFn gets a fs.Fs named fsString either from the cache or creates
|
||||
// GetFn gets an fs.Fs named fsString either from the cache or creates
|
||||
// it afresh with the create function
|
||||
func GetFn(fsString string, create func(fsString string) (fs.Fs, error)) (f fs.Fs, err error) {
|
||||
fsString = canonicalize(fsString)
|
||||
|
@ -77,7 +77,7 @@ func Unpin(f fs.Fs) {
|
|||
c.Pin(fs.ConfigString(f))
|
||||
}
|
||||
|
||||
// Get gets a fs.Fs named fsString either from the cache or creates it afresh
|
||||
// Get gets an fs.Fs named fsString either from the cache or creates it afresh
|
||||
func Get(fsString string) (f fs.Fs, err error) {
|
||||
return GetFn(fsString, fs.NewFs)
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ func Put(fsString string, f fs.Fs) {
|
|||
addMapping(fsString, canonicalName)
|
||||
}
|
||||
|
||||
// Clear removes everything from the cahce
|
||||
// Clear removes everything from the cache
|
||||
func Clear() {
|
||||
c.Clear()
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ var (
|
|||
// ChunkedReader is a reader for a Object with the possibility
|
||||
// of reading the source in chunks of given size
|
||||
//
|
||||
// A initialChunkSize of <= 0 will disable chunked reading.
|
||||
// An initialChunkSize of <= 0 will disable chunked reading.
|
||||
type ChunkedReader struct {
|
||||
ctx context.Context
|
||||
mu sync.Mutex // protects following fields
|
||||
|
@ -36,7 +36,7 @@ type ChunkedReader struct {
|
|||
|
||||
// New returns a ChunkedReader for the Object.
|
||||
//
|
||||
// A initialChunkSize of <= 0 will disable chunked reading.
|
||||
// An initialChunkSize of <= 0 will disable chunked reading.
|
||||
// If maxChunkSize is greater than initialChunkSize, the chunk size will be
|
||||
// doubled after each chunk read with a maximun of maxChunkSize.
|
||||
// A Seek or RangeSeek will reset the chunk size to it's initial value
|
||||
|
|
|
@ -156,7 +156,7 @@ func NewConfig() *ConfigInfo {
|
|||
return c
|
||||
}
|
||||
|
||||
// ConfigToEnv converts an config section and name, eg ("myremote",
|
||||
// ConfigToEnv converts a config section and name, eg ("myremote",
|
||||
// "ignore-size") into an environment name
|
||||
// "RCLONE_CONFIG_MYREMOTE_IGNORE_SIZE"
|
||||
func ConfigToEnv(section, name string) string {
|
||||
|
|
|
@ -426,7 +426,7 @@ func (f *Filter) IncludeDirectory(ctx context.Context, fs fs.Fs) func(string) (b
|
|||
}
|
||||
|
||||
// DirContainsExcludeFile checks if exclude file is present in a
|
||||
// directroy. If fs is nil, it works properly if ExcludeFile is an
|
||||
// directory. If fs is nil, it works properly if ExcludeFile is an
|
||||
// empty string (for testing).
|
||||
func (f *Filter) DirContainsExcludeFile(ctx context.Context, fremote fs.Fs, remote string) (bool, error) {
|
||||
if len(f.Opt.ExcludeFile) > 0 {
|
||||
|
|
4
fs/fs.go
4
fs/fs.go
|
@ -1079,7 +1079,7 @@ type CommandHelp struct {
|
|||
Opts map[string]string // maps option name to a single line help
|
||||
}
|
||||
|
||||
// Commander is an iterface to wrap the Command function
|
||||
// Commander is an interface to wrap the Command function
|
||||
type Commander interface {
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
|
@ -1137,7 +1137,7 @@ func UnWrapObject(o Object) Object {
|
|||
return o
|
||||
}
|
||||
|
||||
// Find looks for an RegInfo object for the name passed in. The name
|
||||
// Find looks for a RegInfo object for the name passed in. The name
|
||||
// can be either the Name or the Prefix.
|
||||
//
|
||||
// Services are looked up in the config file
|
||||
|
|
|
@ -360,7 +360,7 @@ func Cause(cause error) (retriable bool, err error) {
|
|||
}
|
||||
|
||||
// retriableErrorStrings is a list of phrases which when we find it
|
||||
// in an an error, we know it is a networking error which should be
|
||||
// in an error, we know it is a networking error which should be
|
||||
// retried.
|
||||
//
|
||||
// This is incredibly ugly - if only errors.Cause worked for all
|
||||
|
|
|
@ -215,7 +215,7 @@ func NewClient(ci *fs.ConfigInfo) *http.Client {
|
|||
return client
|
||||
}
|
||||
|
||||
// Transport is a our http Transport which wraps an http.Transport
|
||||
// Transport is our http Transport which wraps an http.Transport
|
||||
// * Sets the User Agent
|
||||
// * Does logging
|
||||
type Transport struct {
|
||||
|
|
|
@ -15,7 +15,7 @@ type CallerHook struct {
|
|||
levels []logrus.Level
|
||||
}
|
||||
|
||||
// NewCallerHook use to make an hook
|
||||
// NewCallerHook use to make a hook
|
||||
func NewCallerHook(levels ...logrus.Level) logrus.Hook {
|
||||
hook := CallerHook{
|
||||
Field: "source",
|
||||
|
@ -39,7 +39,7 @@ func (h *CallerHook) Fire(entry *logrus.Entry) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// findCaller ignores the caller relevent to logrus or fslog then find out the exact caller
|
||||
// findCaller ignores the caller relevant to logrus or fslog then find out the exact caller
|
||||
func findCaller(skip int) string {
|
||||
file := ""
|
||||
line := 0
|
||||
|
|
|
@ -418,7 +418,7 @@ command:
|
|||
|
||||
rclone backend noop . -o echo=yes -o blue path1 path2
|
||||
|
||||
Note that arguments must be preceeded by the "-a" flag
|
||||
Note that arguments must be preceded by the "-a" flag
|
||||
|
||||
See the [backend](/commands/rclone_backend/) command for more information.
|
||||
`,
|
||||
|
|
|
@ -19,7 +19,7 @@ var _ io.ReadCloser = (*ReOpen)(nil)
|
|||
|
||||
var errorTestError = errors.New("test error")
|
||||
|
||||
// this is a wrapper for an mockobject with a custom Open function
|
||||
// this is a wrapper for a mockobject with a custom Open function
|
||||
//
|
||||
// breaks indicate the number of bytes to read before returning an
|
||||
// error
|
||||
|
|
|
@ -141,7 +141,7 @@ func (o *RangeOption) Decode(size int64) (offset, limit int64) {
|
|||
func FixRangeOption(options []OpenOption, size int64) {
|
||||
if size == 0 {
|
||||
// if size 0 then remove RangeOption~s
|
||||
// replacing with an NullOptions~s which won't be rendered
|
||||
// replacing with a NullOptions~s which won't be rendered
|
||||
for i := range options {
|
||||
if _, ok := options[i].(*RangeOption); ok {
|
||||
options[i] = NullOption{}
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/cache"
|
||||
)
|
||||
|
||||
// GetFsNamed gets a fs.Fs named fsName either from the cache or creates it afresh
|
||||
// GetFsNamed gets an fs.Fs named fsName either from the cache or creates it afresh
|
||||
func GetFsNamed(in Params, fsName string) (f fs.Fs, err error) {
|
||||
fsString, err := in.GetString(fsName)
|
||||
if err != nil {
|
||||
|
@ -17,7 +17,7 @@ func GetFsNamed(in Params, fsName string) (f fs.Fs, err error) {
|
|||
return cache.Get(fsString)
|
||||
}
|
||||
|
||||
// GetFs gets a fs.Fs named "fs" either from the cache or creates it afresh
|
||||
// GetFs gets an fs.Fs named "fs" either from the cache or creates it afresh
|
||||
func GetFs(in Params) (f fs.Fs, err error) {
|
||||
return GetFsNamed(in, "fs")
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/rc"
|
||||
)
|
||||
|
||||
// Job describes a asynchronous task started via the rc package
|
||||
// Job describes an asynchronous task started via the rc package
|
||||
type Job struct {
|
||||
mu sync.Mutex
|
||||
ID int64 `json:"id"`
|
||||
|
|
|
@ -202,7 +202,7 @@ func unzip(src, dest string) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func exists(path string) (existance bool, stat os.FileInfo, err error) {
|
||||
func exists(path string) (existence bool, stat os.FileInfo, err error) {
|
||||
stat, err = os.Stat(path)
|
||||
if err == nil {
|
||||
return true, stat, nil
|
||||
|
|
|
@ -76,7 +76,7 @@ func (p *pipe) Pop() interface{} {
|
|||
return item
|
||||
}
|
||||
|
||||
// Put an pair into the pipe
|
||||
// Put a pair into the pipe
|
||||
//
|
||||
// It returns ok = false if the context was cancelled
|
||||
//
|
||||
|
|
|
@ -616,7 +616,7 @@ func Run(t *testing.T, opt *Opt) {
|
|||
}
|
||||
})
|
||||
|
||||
// TestFsNewObjectNotFound tests not finding a object
|
||||
// TestFsNewObjectNotFound tests not finding an object
|
||||
t.Run("FsNewObjectNotFound", func(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
// Object in an existing directory
|
||||
|
|
|
@ -102,7 +102,7 @@ type ContentMockObject struct {
|
|||
unknownSize bool
|
||||
}
|
||||
|
||||
// WithContent returns a fs.Object with the given content.
|
||||
// WithContent returns an fs.Object with the given content.
|
||||
func (o Object) WithContent(content []byte, mode SeekMode) *ContentMockObject {
|
||||
return &ContentMockObject{
|
||||
Object: o,
|
||||
|
|
|
@ -48,7 +48,7 @@ var (
|
|||
// if matches then is definitely OK in the shell
|
||||
var shellOK = regexp.MustCompile("^[A-Za-z0-9./_:-]+$")
|
||||
|
||||
// converts a argv style input into a shell command
|
||||
// converts an argv style input into a shell command
|
||||
func toShell(args []string) (result string) {
|
||||
for _, arg := range args {
|
||||
if result != "" {
|
||||
|
|
|
@ -57,7 +57,7 @@ func Unregister(handle FnHandle) {
|
|||
delete(fns, handle)
|
||||
}
|
||||
|
||||
// IgnoreSignals disables the signal handler and prevents Run from beeing executed automatically
|
||||
// IgnoreSignals disables the signal handler and prevents Run from being executed automatically
|
||||
func IgnoreSignals() {
|
||||
registerOnce.Do(func() {})
|
||||
if exitChan != nil {
|
||||
|
|
|
@ -88,7 +88,7 @@ func (c *Cache) Create(bucket string, create CreateFn, exists ExistsFn) (err err
|
|||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
// if have exists fuction and bucket has been deleted, check
|
||||
// if have exists function and bucket has been deleted, check
|
||||
// it still exists
|
||||
if created, ok := c.status[bucket]; ok && !created && exists != nil {
|
||||
found, err := exists()
|
||||
|
|
4
lib/cache/cache.go
vendored
4
lib/cache/cache.go
vendored
|
@ -95,7 +95,7 @@ func (c *Cache) Unpin(key string) {
|
|||
c.addPin(key, -1)
|
||||
}
|
||||
|
||||
// Put puts an value named key into the cache
|
||||
// Put puts a value named key into the cache
|
||||
func (c *Cache) Put(key string, value interface{}) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
@ -159,7 +159,7 @@ func (c *Cache) cacheExpire() {
|
|||
}
|
||||
}
|
||||
|
||||
// Clear removes everything from the cahce
|
||||
// Clear removes everything from the cache
|
||||
func (c *Cache) Clear() {
|
||||
c.mu.Lock()
|
||||
for k := range c.cache {
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
Translate file names for usage on restrictive storage systems
|
||||
|
||||
The restricted set of characters are mapped to a unicode equivalent version
|
||||
(most to their FULLWIDTH variant) to increase compatability with other
|
||||
(most to their FULLWIDTH variant) to increase compatibility with other
|
||||
storage systems.
|
||||
See: http://unicode-search.net/unicode-namesearch.pl?term=FULLWIDTH
|
||||
|
||||
|
|
|
@ -600,7 +600,7 @@ func runePos(r rune, s []rune) int {
|
|||
return -1
|
||||
}
|
||||
|
||||
// quotedToString returns a string for the chars slice where a encoder.QuoteRune is
|
||||
// quotedToString returns a string for the chars slice where an encoder.QuoteRune is
|
||||
// inserted before a char[i] when quoted[i] is true.
|
||||
func quotedToString(chars []rune, quoted []bool) string {
|
||||
var out strings.Builder
|
||||
|
|
|
@ -82,7 +82,7 @@ func NewRepeatableReaderSized(r io.Reader, size int) *RepeatableReader {
|
|||
}
|
||||
|
||||
// NewRepeatableLimitReader create new repeatable reader from Reader r
|
||||
// with an initial buffer of size wrapped in a io.LimitReader to read
|
||||
// with an initial buffer of size wrapped in an io.LimitReader to read
|
||||
// only size.
|
||||
func NewRepeatableLimitReader(r io.Reader, size int) *RepeatableReader {
|
||||
return NewRepeatableReaderSized(io.LimitReader(r, int64(size)), size)
|
||||
|
@ -98,7 +98,7 @@ func NewRepeatableReaderBuffer(r io.Reader, buf []byte) *RepeatableReader {
|
|||
}
|
||||
|
||||
// NewRepeatableLimitReaderBuffer create new repeatable reader from
|
||||
// Reader r and buf wrapped in a io.LimitReader to read only size.
|
||||
// Reader r and buf wrapped in an io.LimitReader to read only size.
|
||||
func NewRepeatableLimitReaderBuffer(r io.Reader, buf []byte, size int64) *RepeatableReader {
|
||||
return NewRepeatableReaderBuffer(io.LimitReader(r, size), buf)
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ func TestRepeatableReader(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
require.Equal(t, 2, int(pos))
|
||||
|
||||
// Should read from seek postion and past it
|
||||
// Should read from seek position and past it
|
||||
dst = make([]byte, 5)
|
||||
n, err = io.ReadFull(r, dst)
|
||||
assert.Nil(t, err)
|
||||
|
|
|
@ -111,7 +111,7 @@ func (api *Client) SetUserPass(UserName, Password string) *Client {
|
|||
return api
|
||||
}
|
||||
|
||||
// SetCookie creates an Cookies Header for all requests with the supplied
|
||||
// SetCookie creates a Cookies Header for all requests with the supplied
|
||||
// cookies passed in.
|
||||
// All cookies have to be supplied at once, all cookies will be overwritten
|
||||
// on a new call to the method
|
||||
|
@ -407,7 +407,7 @@ func (api *Client) CallJSON(ctx context.Context, opts *Opts, request interface{}
|
|||
return api.callCodec(ctx, opts, request, response, json.Marshal, DecodeJSON, "application/json")
|
||||
}
|
||||
|
||||
// CallXML runs Call and decodes the body as a XML object into response (if not nil)
|
||||
// CallXML runs Call and decodes the body as an XML object into response (if not nil)
|
||||
//
|
||||
// If request is not nil then it will be XML encoded as the body of the request
|
||||
//
|
||||
|
|
|
@ -54,7 +54,7 @@ type File struct {
|
|||
appendMode bool // file was opened with O_APPEND
|
||||
sys interface{} // user defined info to be attached here
|
||||
|
||||
muRW sync.Mutex // synchonize RWFileHandle.openPending(), RWFileHandle.close() and File.Remove
|
||||
muRW sync.Mutex // synchronize RWFileHandle.openPending(), RWFileHandle.close() and File.Remove
|
||||
}
|
||||
|
||||
// newFile creates a new File
|
||||
|
|
|
@ -300,7 +300,7 @@ func testFileRename(t *testing.T, mode vfscommon.CacheMode) {
|
|||
}
|
||||
|
||||
// now try renaming it with the file open
|
||||
// first open it and write to it but dont close it
|
||||
// first open it and write to it but don't close it
|
||||
fd, err := file.Open(os.O_WRONLY | os.O_TRUNC)
|
||||
require.NoError(t, err)
|
||||
newContents := []byte("this is some new contents")
|
||||
|
|
|
@ -117,7 +117,7 @@ func (fh *ReadFileHandle) seek(offset int64, reopen bool) (err error) {
|
|||
fh.hash = nil
|
||||
if !reopen {
|
||||
ar := fh.r.GetAsyncReader()
|
||||
// try to fullfill the seek with buffer discard
|
||||
// try to fulfill the seek with buffer discard
|
||||
if ar != nil && ar.SkipBytes(int(offset-fh.offset)) {
|
||||
fh.offset = offset
|
||||
return nil
|
||||
|
@ -252,7 +252,7 @@ func waitSequential(what string, remote string, cond *sync.Cond, maxWait time.Du
|
|||
// Implementation of ReadAt - call with lock held
|
||||
func (fh *ReadFileHandle) readAt(p []byte, off int64) (n int, err error) {
|
||||
// defer log.Trace(fh.remote, "p[%d], off=%d", len(p), off)("n=%d, err=%v", &n, &err)
|
||||
err = fh.openPending() // FIXME pending open could be more efficient in the presense of seek (and retries)
|
||||
err = fh.openPending() // FIXME pending open could be more efficient in the presence of seek (and retries)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) {
|
|||
}
|
||||
}
|
||||
|
||||
// try to open a exising cache file
|
||||
// try to open an existing cache file
|
||||
fd, err = file.OpenFile(fh.file.osPath(), cacheFileOpenFlags&^os.O_CREATE, 0600)
|
||||
if os.IsNotExist(err) {
|
||||
// cache file does not exist, so need to fetch it if we have an object to fetch
|
||||
|
@ -151,7 +151,7 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) {
|
|||
}
|
||||
}
|
||||
// Windows doesn't seem to deal well with O_TRUNC and
|
||||
// certain access modes so so truncate the file if it
|
||||
// certain access modes so truncate the file if it
|
||||
// exists in these cases.
|
||||
if runtime.GOOS == "windows" && fh.flags&os.O_APPEND != 0 {
|
||||
cacheFileOpenFlags &^= os.O_TRUNC
|
||||
|
|
|
@ -162,7 +162,7 @@ func TestCacheNew(t *testing.T) {
|
|||
|
||||
// try purging with file closed
|
||||
c.purgeOld(10 * time.Second)
|
||||
// ...nothing should happend
|
||||
// ...nothing should happen
|
||||
_, err = os.Stat(p)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ var DefaultOpt = Options{
|
|||
ReadOnly: false,
|
||||
Umask: 0,
|
||||
UID: ^uint32(0), // these values instruct WinFSP-FUSE to use the current user
|
||||
GID: ^uint32(0), // overriden for non windows in mount_unix.go
|
||||
GID: ^uint32(0), // overridden for non windows in mount_unix.go
|
||||
DirPerms: os.FileMode(0777),
|
||||
FilePerms: os.FileMode(0666),
|
||||
CacheMode: CacheModeOff,
|
||||
|
|
|
@ -192,7 +192,7 @@ func (fh *WriteFileHandle) close() (err error) {
|
|||
fh.file.delWriter(fh, false)
|
||||
fh.file.finishWriterClose()
|
||||
}()
|
||||
// If file not opened and not safe to truncate then then leave file intact
|
||||
// If file not opened and not safe to truncate then leave file intact
|
||||
if !fh.opened && !fh.safeToTruncate() {
|
||||
return nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user