mirror of
https://github.com/rclone/rclone.git
synced 2024-11-24 14:12:00 +08:00
build: fix lint errors when re-enabling revive exported & package-comments
This commit is contained in:
parent
feaa20d885
commit
e953598987
|
@ -914,7 +914,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||||
return do(ctx, uRemote, expire, unlink)
|
return do(ctx, uRemote, expire, unlink)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
// PutUnchecked in to the remote path with the modTime given of the given size
|
||||||
//
|
//
|
||||||
// May create the object even if it returns an error - if so
|
// May create the object even if it returns an error - if so
|
||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
// Package protondrive implements the Proton Drive backend
|
||||||
package protondrive
|
package protondrive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -45,8 +46,8 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrCanNotUploadFileWithUnknownSize = errors.New("proton Drive can't upload files with unknown size")
|
errCanNotUploadFileWithUnknownSize = errors.New("proton Drive can't upload files with unknown size")
|
||||||
ErrCanNotPurgeRootDirectory = errors.New("can't purge root directory")
|
errCanNotPurgeRootDirectory = errors.New("can't purge root directory")
|
||||||
|
|
||||||
// for the auth/deauth handler
|
// for the auth/deauth handler
|
||||||
_mapper configmap.Mapper
|
_mapper configmap.Mapper
|
||||||
|
@ -569,12 +570,10 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirCacher describes an interface for doing the low level directory work
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||||
//
|
//
|
||||||
// This should be implemented by the backend and will be called by the
|
// This should be implemented by the backend and will be called by the
|
||||||
// dircache package when appropriate.
|
// dircache package when appropriate.
|
||||||
//
|
|
||||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
|
||||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (string, bool, error) {
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (string, bool, error) {
|
||||||
/* f.opt.Enc.FromStandardName(leaf) not required since the DirCache only process sanitized path */
|
/* f.opt.Enc.FromStandardName(leaf) not required since the DirCache only process sanitized path */
|
||||||
|
|
||||||
|
@ -593,12 +592,10 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (string, bool, e
|
||||||
return link.LinkID, true, nil
|
return link.LinkID, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirCacher describes an interface for doing the low level directory work
|
// CreateDir makes a directory with pathID as parent and name leaf
|
||||||
//
|
//
|
||||||
// This should be implemented by the backend and will be called by the
|
// This should be implemented by the backend and will be called by the
|
||||||
// dircache package when appropriate.
|
// dircache package when appropriate.
|
||||||
//
|
|
||||||
// CreateDir makes a directory with pathID as parent and name leaf
|
|
||||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (string, error) {
|
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (string, error) {
|
||||||
/* f.opt.Enc.FromStandardName(leaf) not required since the DirCache only process sanitized path */
|
/* f.opt.Enc.FromStandardName(leaf) not required since the DirCache only process sanitized path */
|
||||||
|
|
||||||
|
@ -626,7 +623,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (string, error)
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
if size < 0 {
|
if size < 0 {
|
||||||
return nil, ErrCanNotUploadFileWithUnknownSize
|
return nil, errCanNotUploadFileWithUnknownSize
|
||||||
}
|
}
|
||||||
|
|
||||||
existingObj, err := f.NewObject(ctx, src.Remote())
|
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||||
|
@ -725,7 +722,7 @@ func (f *Fs) DirCacheFlush() {
|
||||||
f.protonDrive.ClearCache()
|
f.protonDrive.ClearCache()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the supported hash types of the filesystem
|
// Hashes returns the supported hash types of the filesystem
|
||||||
func (f *Fs) Hashes() hash.Set {
|
func (f *Fs) Hashes() hash.Set {
|
||||||
return hash.Set(hash.SHA1)
|
return hash.Set(hash.SHA1)
|
||||||
}
|
}
|
||||||
|
@ -887,7 +884,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
if size < 0 {
|
if size < 0 {
|
||||||
return ErrCanNotUploadFileWithUnknownSize
|
return errCanNotUploadFileWithUnknownSize
|
||||||
}
|
}
|
||||||
|
|
||||||
remote := o.Remote()
|
remote := o.Remote()
|
||||||
|
@ -945,7 +942,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
if root == "" {
|
if root == "" {
|
||||||
// we can't remove the root directory, but we can list the directory and delete every folder and file in here
|
// we can't remove the root directory, but we can list the directory and delete every folder and file in here
|
||||||
return ErrCanNotPurgeRootDirectory
|
return errCanNotPurgeRootDirectory
|
||||||
}
|
}
|
||||||
|
|
||||||
folderLinkID, err := f.dirCache.FindDir(ctx, f.sanitizePath(dir), false)
|
folderLinkID, err := f.dirCache.FindDir(ctx, f.sanitizePath(dir), false)
|
||||||
|
|
|
@ -104,6 +104,7 @@ func (gs *Groups) AllRegistered() map[*pflag.Flag]struct{} {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// All is the global stats Groups
|
||||||
var All *Groups
|
var All *Groups
|
||||||
|
|
||||||
// Groups of flags for documentation purposes
|
// Groups of flags for documentation purposes
|
||||||
|
|
|
@ -639,6 +639,7 @@ type OpenWriterAter interface {
|
||||||
// OpenWriterAtFn describes the OpenWriterAt function pointer
|
// OpenWriterAtFn describes the OpenWriterAt function pointer
|
||||||
type OpenWriterAtFn func(ctx context.Context, remote string, size int64) (WriterAtCloser, error)
|
type OpenWriterAtFn func(ctx context.Context, remote string, size int64) (WriterAtCloser, error)
|
||||||
|
|
||||||
|
// OpenChunkWriter is an option interface for Fs to implement chunked writing
|
||||||
type OpenChunkWriter interface {
|
type OpenChunkWriter interface {
|
||||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||||
//
|
//
|
||||||
|
@ -650,6 +651,7 @@ type OpenChunkWriter interface {
|
||||||
// OpenChunkWriterFn describes the OpenChunkWriter function pointer
|
// OpenChunkWriterFn describes the OpenChunkWriter function pointer
|
||||||
type OpenChunkWriterFn func(ctx context.Context, remote string, src ObjectInfo, options ...OpenOption) (chunkSize int64, writer ChunkWriter, err error)
|
type OpenChunkWriterFn func(ctx context.Context, remote string, src ObjectInfo, options ...OpenOption) (chunkSize int64, writer ChunkWriter, err error)
|
||||||
|
|
||||||
|
// ChunkWriter is returned by OpenChunkWriter to implement chunked writing
|
||||||
type ChunkWriter interface {
|
type ChunkWriter interface {
|
||||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||||
WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error)
|
WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error)
|
||||||
|
|
|
@ -276,18 +276,22 @@ func (o MetadataOption) Mandatory() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ChunkOption defines an Option which returns a preferred chunk size
|
||||||
type ChunkOption struct {
|
type ChunkOption struct {
|
||||||
ChunkSize int64
|
ChunkSize int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Header formats the option as an http header
|
||||||
func (o *ChunkOption) Header() (key string, value string) {
|
func (o *ChunkOption) Header() (key string, value string) {
|
||||||
return "chunkSize", fmt.Sprintf("%v", o.ChunkSize)
|
return "chunkSize", fmt.Sprintf("%v", o.ChunkSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Mandatory returns whether the option must be parsed or can be ignored
|
||||||
func (o *ChunkOption) Mandatory() bool {
|
func (o *ChunkOption) Mandatory() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// String formats the option into human-readable form
|
||||||
func (o *ChunkOption) String() string {
|
func (o *ChunkOption) String() string {
|
||||||
return fmt.Sprintf("ChunkOption(%v)", o.ChunkSize)
|
return fmt.Sprintf("ChunkOption(%v)", o.ChunkSize)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
// Package multipart implements generic multipart uploading.
|
||||||
package multipart
|
package multipart
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -37,7 +38,7 @@ func getPool() *pool.Pool {
|
||||||
return bufferPool
|
return bufferPool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get a pool.RW using the multipart pool
|
// NewRW gets a pool.RW using the multipart pool
|
||||||
func NewRW() *pool.RW {
|
func NewRW() *pool.RW {
|
||||||
return pool.NewRW(getPool())
|
return pool.NewRW(getPool())
|
||||||
}
|
}
|
||||||
|
@ -50,7 +51,7 @@ type UploadMultipartOptions struct {
|
||||||
LeavePartsOnError bool // if set don't delete parts uploaded so far on error
|
LeavePartsOnError bool // if set don't delete parts uploaded so far on error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do a generic multipart upload from src using f as OpenChunkWriter.
|
// UploadMultipart does a generic multipart upload from src using f as OpenChunkWriter.
|
||||||
//
|
//
|
||||||
// in is read seqentially and chunks from it are uploaded in parallel.
|
// in is read seqentially and chunks from it are uploaded in parallel.
|
||||||
//
|
//
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
// Package proxy enables SOCKS5 proxy dialling
|
||||||
package proxy
|
package proxy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
Loading…
Reference in New Issue
Block a user