2015-09-23 01:47:16 +08:00
// Package amazonclouddrive provides an interface to the Amazon Cloud
// Drive object storage system.
2015-09-03 06:37:42 +08:00
package amazonclouddrive
/ *
FIXME make searching for directory in id and file in id more efficient
- use the name : search parameter - remember the escaping rules
- use Folder GetNode and GetFile
FIXME make the default for no files and no dirs be ( FILE & FOLDER ) so
we ignore assets completely !
* /
import (
2016-11-08 04:38:57 +08:00
"encoding/json"
2015-09-03 06:37:42 +08:00
"fmt"
"io"
"log"
2015-09-10 06:23:37 +08:00
"net/http"
2016-11-26 05:52:43 +08:00
"path"
2015-09-03 06:37:42 +08:00
"regexp"
"strings"
"time"
"github.com/ncw/go-acd"
"github.com/ncw/rclone/fs"
2018-01-13 00:30:54 +08:00
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
2018-01-12 00:29:20 +08:00
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
2016-06-12 22:06:02 +08:00
"github.com/pkg/errors"
2015-09-03 06:37:42 +08:00
"golang.org/x/oauth2"
)
const (
2017-06-10 19:02:34 +08:00
folderKind = "FOLDER"
fileKind = "FILE"
assetKind = "ASSET"
statusAvailable = "AVAILABLE"
timeFormat = time . RFC3339 // 2014-03-07T22:31:12.173Z
minSleep = 20 * time . Millisecond
warnFileSize = 50000 << 20 // Display warning for files larger than this size
2015-09-03 06:37:42 +08:00
)
// Globals
var (
2016-01-31 02:08:44 +08:00
// Flags
tempLinkThreshold = fs . SizeSuffix ( 9 << 30 ) // Download files bigger than this via the tempLink
2018-01-13 00:30:54 +08:00
uploadWaitPerGB = flags . DurationP ( "acd-upload-wait-per-gb" , "" , 180 * time . Second , "Additional time per GB to wait after a failed complete upload to see if it appears." )
2015-09-03 06:37:42 +08:00
// Description of how to auth for this app
acdConfig = & oauth2 . Config {
Scopes : [ ] string { "clouddrive:read_all" , "clouddrive:write" } ,
Endpoint : oauth2 . Endpoint {
AuthURL : "https://www.amazon.com/ap/oa" ,
TokenURL : "https://api.amazon.com/auth/o2/token" ,
} ,
2017-06-10 19:02:34 +08:00
ClientID : "" ,
ClientSecret : "" ,
2015-09-12 21:17:39 +08:00
RedirectURL : oauthutil . RedirectURL ,
2015-09-03 06:37:42 +08:00
}
)
// Register with Fs
func init ( ) {
2016-02-18 19:35:25 +08:00
fs . Register ( & fs . RegInfo {
2016-02-16 02:11:53 +08:00
Name : "amazon cloud drive" ,
2016-07-11 19:42:44 +08:00
Description : "Amazon Drive" ,
2016-02-16 02:11:53 +08:00
NewFs : NewFs ,
2015-09-03 06:37:42 +08:00
Config : func ( name string ) {
2016-01-04 23:13:36 +08:00
err := oauthutil . Config ( "amazon cloud drive" , name , acdConfig )
2015-09-03 06:37:42 +08:00
if err != nil {
log . Fatalf ( "Failed to configure token: %v" , err )
}
} ,
Options : [ ] fs . Option { {
2018-01-13 00:30:54 +08:00
Name : config . ConfigClientID ,
2017-06-10 19:02:34 +08:00
Help : "Amazon Application Client Id - required." ,
2015-09-03 06:37:42 +08:00
} , {
2018-01-13 00:30:54 +08:00
Name : config . ConfigClientSecret ,
2017-06-10 19:02:34 +08:00
Help : "Amazon Application Client Secret - required." ,
} , {
2018-01-13 00:30:54 +08:00
Name : config . ConfigAuthURL ,
2017-06-10 19:02:34 +08:00
Help : "Auth server URL - leave blank to use Amazon's." ,
} , {
2018-01-13 00:30:54 +08:00
Name : config . ConfigTokenURL ,
2017-06-10 19:02:34 +08:00
Help : "Token server url - leave blank to use Amazon's." ,
2015-09-03 06:37:42 +08:00
} } ,
} )
2018-01-13 00:30:54 +08:00
flags . VarP ( & tempLinkThreshold , "acd-templink-threshold" , "" , "Files >= this size will be downloaded via their tempLink." )
2015-09-03 06:37:42 +08:00
}
2015-11-07 19:14:46 +08:00
// Fs represents a remote acd server
type Fs struct {
2017-02-12 01:49:59 +08:00
name string // name of this remote
features * fs . Features // optional features
c * acd . Client // the connection to the acd server
noAuthClient * http . Client // unauthenticated http client
root string // the path we are working on
dirCache * dircache . DirCache // Map of directory path to directory id
pacer * pacer . Pacer // pacer for API calls
trueRootID string // ID of true root directory
tokenRenewer * oauthutil . Renew // renew the token on expiry
2015-09-03 06:37:42 +08:00
}
2015-11-07 19:14:46 +08:00
// Object describes a acd object
2015-09-03 06:37:42 +08:00
//
// Will definitely have info but maybe not meta
2015-11-07 19:14:46 +08:00
type Object struct {
fs * Fs // what this object is part of
2015-09-03 06:37:42 +08:00
remote string // The remote path
info * acd . Node // Info from the acd object if known
}
// ------------------------------------------------------------
2015-09-23 01:47:16 +08:00
// Name of the remote (as passed into NewFs)
2015-11-07 19:14:46 +08:00
func ( f * Fs ) Name ( ) string {
2015-09-03 06:37:42 +08:00
return f . name
}
2015-09-23 01:47:16 +08:00
// Root of the remote (as passed into NewFs)
2015-11-07 19:14:46 +08:00
func ( f * Fs ) Root ( ) string {
2015-09-03 06:37:42 +08:00
return f . root
}
2015-11-07 19:14:46 +08:00
// String converts this Fs to a string
func ( f * Fs ) String ( ) string {
2016-07-11 19:42:44 +08:00
return fmt . Sprintf ( "amazon drive root '%s'" , f . root )
2015-09-03 06:37:42 +08:00
}
2017-01-14 01:21:47 +08:00
// Features returns the optional features of this Fs
func ( f * Fs ) Features ( ) * fs . Features {
return f . features
}
2015-09-03 06:37:42 +08:00
// Pattern to match a acd path
var matcher = regexp . MustCompile ( ` ^([^/]*)(.*)$ ` )
// parsePath parses an acd 'url'
func parsePath ( path string ) ( root string ) {
root = strings . Trim ( path , "/" )
return
}
2015-09-15 04:00:44 +08:00
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = [ ] int {
2016-02-16 22:45:22 +08:00
400 , // Bad request (seen in "Next token is expired")
401 , // Unauthorized (seen in "Token has expired")
408 , // Request Timeout
2015-09-15 04:00:44 +08:00
429 , // Rate exceeded.
500 , // Get occasional 500 Internal Server Error
2016-11-04 02:56:21 +08:00
502 , // Bad Gateway when doing big listings
2015-09-18 01:12:37 +08:00
503 , // Service Unavailable
2016-02-16 22:45:22 +08:00
504 , // Gateway Time-out
2015-09-15 04:00:44 +08:00
}
2015-09-12 02:18:41 +08:00
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
2016-05-24 01:03:22 +08:00
func ( f * Fs ) shouldRetry ( resp * http . Response , err error ) ( bool , error ) {
2016-05-28 22:45:39 +08:00
if resp != nil {
if resp . StatusCode == 401 {
2017-02-12 01:49:59 +08:00
f . tokenRenewer . Invalidate ( )
2017-02-09 19:01:20 +08:00
fs . Debugf ( f , "401 error received - invalidating token" )
2016-05-28 22:45:39 +08:00
return true , err
}
// Work around receiving this error sporadically on authentication
//
// HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
if resp . StatusCode == 403 && strings . Contains ( err . Error ( ) , "Authorization header requires" ) {
2017-02-10 01:08:51 +08:00
fs . Debugf ( f , "403 \"Authorization header requires...\" error received - retry" )
2016-05-28 22:45:39 +08:00
return true , err
}
2016-05-24 01:03:22 +08:00
}
2018-01-13 00:30:54 +08:00
return fserrors . ShouldRetry ( err ) || fserrors . ShouldRetryHTTP ( resp , retryErrorCodes ) , err
2015-09-12 02:18:41 +08:00
}
2017-11-24 17:08:35 +08:00
// If query parameters contain X-Amz-Algorithm remove Authorization header
//
// This happens when ACD redirects to S3 for the download. The oauth
// transport puts an Authorization header in which we need to remove
// otherwise we get this message from AWS
//
// Only one auth mechanism allowed; only the X-Amz-Algorithm query
// parameter, Signature query string parameter or the Authorization
// header should be specified
func filterRequest ( req * http . Request ) {
if req . URL . Query ( ) . Get ( "X-Amz-Algorithm" ) != "" {
fs . Debugf ( nil , "Removing Authorization: header after redirect to S3" )
req . Header . Del ( "Authorization" )
}
}
2015-11-07 19:14:46 +08:00
// NewFs constructs an Fs from the path, container:path
2015-09-03 06:37:42 +08:00
func NewFs ( name , root string ) ( fs . Fs , error ) {
root = parsePath ( root )
2018-01-13 00:30:54 +08:00
baseClient := fshttp . NewClient ( fs . Config )
2017-11-24 17:08:35 +08:00
if do , ok := baseClient . Transport . ( interface {
SetRequestFilter ( f func ( req * http . Request ) )
} ) ; ok {
do . SetRequestFilter ( filterRequest )
} else {
fs . Debugf ( name + ":" , "Couldn't add request filter - large file downloads will fail" )
}
oAuthClient , ts , err := oauthutil . NewClientWithBaseClient ( name , acdConfig , baseClient )
2015-09-03 06:37:42 +08:00
if err != nil {
2016-07-11 19:42:44 +08:00
log . Fatalf ( "Failed to configure Amazon Drive: %v" , err )
2015-09-03 06:37:42 +08:00
}
c := acd . NewClient ( oAuthClient )
2015-11-07 19:14:46 +08:00
f := & Fs {
2016-01-31 02:08:44 +08:00
name : name ,
root : root ,
c : c ,
pacer : pacer . New ( ) . SetMinSleep ( minSleep ) . SetPacer ( pacer . AmazonCloudDrivePacer ) ,
2018-01-13 00:30:54 +08:00
noAuthClient : fshttp . NewClient ( fs . Config ) ,
2015-09-03 06:37:42 +08:00
}
2017-08-09 22:27:43 +08:00
f . features = ( & fs . Features {
CaseInsensitive : true ,
ReadMimeType : true ,
CanHaveEmptyDirectories : true ,
} ) . Fill ( f )
2015-09-03 06:37:42 +08:00
2017-06-27 18:34:32 +08:00
// Renew the token in the background
f . tokenRenewer = oauthutil . NewRenew ( f . String ( ) , ts , func ( ) error {
_ , err := f . getRootInfo ( )
return err
} )
2015-09-03 06:37:42 +08:00
// Update endpoints
2015-09-12 02:18:41 +08:00
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
_ , resp , err = f . c . Account . GetEndpoints ( )
2016-05-24 01:03:22 +08:00
return f . shouldRetry ( resp , err )
2015-09-12 02:18:41 +08:00
} )
2015-09-03 06:37:42 +08:00
if err != nil {
2016-06-12 22:06:02 +08:00
return nil , errors . Wrap ( err , "failed to get endpoints" )
2015-09-03 06:37:42 +08:00
}
// Get rootID
2016-08-09 02:05:06 +08:00
rootInfo , err := f . getRootInfo ( )
2015-09-03 06:37:42 +08:00
if err != nil || rootInfo . Id == nil {
2016-06-12 22:06:02 +08:00
return nil , errors . Wrap ( err , "failed to get root" )
2015-09-03 06:37:42 +08:00
}
2016-12-10 00:56:07 +08:00
f . trueRootID = * rootInfo . Id
2015-09-03 06:37:42 +08:00
2016-12-10 00:56:07 +08:00
f . dirCache = dircache . New ( root , f . trueRootID , f )
2015-09-03 06:37:42 +08:00
// Find the current root
err = f . dirCache . FindRoot ( false )
if err != nil {
// Assume it is a file
newRoot , remote := dircache . SplitPath ( root )
newF := * f
2016-12-10 00:56:07 +08:00
newF . dirCache = dircache . New ( newRoot , f . trueRootID , & newF )
2015-09-03 06:37:42 +08:00
newF . root = newRoot
// Make new Fs which is the parent
err = newF . dirCache . FindRoot ( false )
if err != nil {
// No root so return old f
return f , nil
}
2016-06-26 04:23:20 +08:00
_ , err := newF . newObjectWithInfo ( remote , nil )
if err != nil {
if err == fs . ErrorObjectNotFound {
// File doesn't exist so return old f
return f , nil
}
return nil , err
2015-09-03 06:37:42 +08:00
}
2016-06-22 01:01:53 +08:00
// return an error with an fs which points to the parent
return & newF , fs . ErrorIsFile
2015-09-03 06:37:42 +08:00
}
return f , nil
}
2016-08-09 02:05:06 +08:00
// getRootInfo gets the root folder info
func ( f * Fs ) getRootInfo ( ) ( rootInfo * acd . Folder , err error ) {
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
rootInfo , resp , err = f . c . Nodes . GetRoot ( )
return f . shouldRetry ( resp , err )
} )
return rootInfo , err
}
2016-06-26 04:58:34 +08:00
// Return an Object from a path
2015-09-03 06:37:42 +08:00
//
2016-06-26 04:23:20 +08:00
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func ( f * Fs ) newObjectWithInfo ( remote string , info * acd . Node ) ( fs . Object , error ) {
2015-11-07 19:14:46 +08:00
o := & Object {
fs : f ,
2015-09-03 06:37:42 +08:00
remote : remote ,
}
if info != nil {
// Set info but not meta
o . info = info
} else {
err := o . readMetaData ( ) // reads info and meta, returning an error
if err != nil {
2016-06-26 04:23:20 +08:00
return nil , err
2015-09-03 06:37:42 +08:00
}
}
2016-06-26 04:23:20 +08:00
return o , nil
2015-09-03 06:37:42 +08:00
}
2016-06-26 04:23:20 +08:00
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func ( f * Fs ) NewObject ( remote string ) ( fs . Object , error ) {
2016-06-26 04:58:34 +08:00
return f . newObjectWithInfo ( remote , nil )
2015-09-03 06:37:42 +08:00
}
2015-09-23 01:47:16 +08:00
// FindLeaf finds a directory of name leaf in the folder with ID pathID
2015-11-07 19:14:46 +08:00
func ( f * Fs ) FindLeaf ( pathID , leaf string ) ( pathIDOut string , found bool , err error ) {
2017-02-09 19:01:20 +08:00
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
2015-09-23 01:47:16 +08:00
folder := acd . FolderFromId ( pathID , f . c . Nodes )
2015-09-12 02:18:41 +08:00
var resp * http . Response
var subFolder * acd . Folder
err = f . pacer . Call ( func ( ) ( bool , error ) {
subFolder , resp , err = folder . GetFolder ( leaf )
2016-05-24 01:03:22 +08:00
return f . shouldRetry ( resp , err )
2015-09-12 02:18:41 +08:00
} )
2015-09-03 06:37:42 +08:00
if err != nil {
if err == acd . ErrorNodeNotFound {
2017-02-09 19:01:20 +08:00
//fs.Debugf(f, "...Not found")
2015-09-03 06:37:42 +08:00
return "" , false , nil
}
2017-02-09 19:01:20 +08:00
//fs.Debugf(f, "...Error %v", err)
2015-09-03 06:37:42 +08:00
return "" , false , err
}
if subFolder . Status != nil && * subFolder . Status != statusAvailable {
2017-02-09 19:01:20 +08:00
fs . Debugf ( f , "Ignoring folder %q in state %q" , leaf , * subFolder . Status )
2015-09-03 06:37:42 +08:00
time . Sleep ( 1 * time . Second ) // FIXME wait for problem to go away!
return "" , false , nil
}
2017-02-09 19:01:20 +08:00
//fs.Debugf(f, "...Found(%q, %v)", *subFolder.Id, leaf)
2015-09-03 06:37:42 +08:00
return * subFolder . Id , true , nil
}
2015-09-23 01:47:16 +08:00
// CreateDir makes a directory with pathID as parent and name leaf
2015-11-07 19:14:46 +08:00
func ( f * Fs ) CreateDir ( pathID , leaf string ) ( newID string , err error ) {
2015-09-23 01:47:16 +08:00
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
folder := acd . FolderFromId ( pathID , f . c . Nodes )
2015-09-12 02:18:41 +08:00
var resp * http . Response
var info * acd . Folder
err = f . pacer . Call ( func ( ) ( bool , error ) {
info , resp , err = folder . CreateFolder ( leaf )
2016-05-24 01:03:22 +08:00
return f . shouldRetry ( resp , err )
2015-09-12 02:18:41 +08:00
} )
2015-09-03 06:37:42 +08:00
if err != nil {
2015-09-12 02:18:41 +08:00
//fmt.Printf("...Error %v\n", err)
2015-09-03 06:37:42 +08:00
return "" , err
}
2015-09-12 02:18:41 +08:00
//fmt.Printf("...Id %q\n", *info.Id)
2015-09-03 06:37:42 +08:00
return * info . Id , nil
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
// User function to process a File item from listAll
//
// Should return true to finish processing
type listAllFn func ( * acd . Node ) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
2015-11-07 19:14:46 +08:00
func ( f * Fs ) listAll ( dirID string , title string , directoriesOnly bool , filesOnly bool , fn listAllFn ) ( found bool , err error ) {
2015-09-23 01:47:16 +08:00
query := "parents:" + dirID
2015-09-03 06:37:42 +08:00
if directoriesOnly {
query += " AND kind:" + folderKind
} else if filesOnly {
query += " AND kind:" + fileKind
} else {
// FIXME none of these work
//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
}
opts := acd . NodeListOptions {
Filters : query ,
}
var nodes [ ] * acd . Node
2016-05-15 00:15:42 +08:00
var out [ ] * acd . Node
2015-09-10 06:23:37 +08:00
//var resp *http.Response
2015-09-03 06:37:42 +08:00
for {
2015-09-12 02:18:41 +08:00
var resp * http . Response
2016-05-15 00:15:42 +08:00
err = f . pacer . CallNoRetry ( func ( ) ( bool , error ) {
2015-09-12 02:18:41 +08:00
nodes , resp , err = f . c . Nodes . GetNodes ( & opts )
2016-05-24 01:03:22 +08:00
return f . shouldRetry ( resp , err )
2015-09-12 02:18:41 +08:00
} )
2015-09-03 06:37:42 +08:00
if err != nil {
2016-04-22 03:06:21 +08:00
return false , err
2015-09-03 06:37:42 +08:00
}
if nodes == nil {
break
}
for _ , node := range nodes {
if node . Name != nil && node . Id != nil && node . Kind != nil && node . Status != nil {
// Ignore nodes if not AVAILABLE
if * node . Status != statusAvailable {
continue
}
2016-11-07 00:07:11 +08:00
// Ignore bogus nodes Amazon Drive sometimes reports
hasValidParent := false
for _ , parent := range node . Parents {
if parent == dirID {
hasValidParent = true
break
}
}
if ! hasValidParent {
continue
}
2016-05-15 00:15:42 +08:00
// Store the nodes up in case we have to retry the listing
out = append ( out , node )
2015-09-03 06:37:42 +08:00
}
}
}
2016-05-15 00:15:42 +08:00
// Send the nodes now
for _ , node := range out {
if fn ( node ) {
found = true
break
}
}
2015-09-03 06:37:42 +08:00
return
}
2017-06-12 05:43:31 +08:00
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func ( f * Fs ) List ( dir string ) ( entries fs . DirEntries , err error ) {
err = f . dirCache . FindRoot ( false )
if err != nil {
return nil , err
}
directoryID , err := f . dirCache . FindDir ( dir , false )
if err != nil {
return nil , err
}
2016-05-15 00:15:42 +08:00
maxTries := fs . Config . LowLevelRetries
2017-06-12 05:43:31 +08:00
var iErr error
2016-05-15 00:15:42 +08:00
for tries := 1 ; tries <= maxTries ; tries ++ {
2017-06-12 05:43:31 +08:00
entries = nil
_ , err = f . listAll ( directoryID , "" , false , false , func ( node * acd . Node ) bool {
remote := path . Join ( dir , * node . Name )
2016-05-15 00:15:42 +08:00
switch * node . Kind {
case folderKind :
2017-06-12 05:43:31 +08:00
// cache the directory ID for later lookups
f . dirCache . Put ( remote , * node . Id )
2017-06-30 20:37:29 +08:00
when , _ := time . Parse ( timeFormat , * node . ModifiedDate ) // FIXME
2017-08-02 23:44:36 +08:00
d := fs . NewDir ( remote , when ) . SetID ( * node . Id )
2017-06-12 05:43:31 +08:00
entries = append ( entries , d )
2016-05-15 00:15:42 +08:00
case fileKind :
2016-06-26 04:23:20 +08:00
o , err := f . newObjectWithInfo ( remote , node )
if err != nil {
2017-06-12 05:43:31 +08:00
iErr = err
2016-06-26 04:23:20 +08:00
return true
2016-04-22 03:06:21 +08:00
}
2017-06-12 05:43:31 +08:00
entries = append ( entries , o )
2016-05-15 00:15:42 +08:00
default :
// ignore ASSET etc
2015-09-03 06:37:42 +08:00
}
2016-05-15 00:15:42 +08:00
return false
} )
2017-06-12 05:43:31 +08:00
if iErr != nil {
return nil , iErr
}
2018-01-13 00:30:54 +08:00
if fserrors . IsRetryError ( err ) {
2017-06-12 05:43:31 +08:00
fs . Debugf ( f , "Directory listing error for %q: %v - low level retry %d/%d" , dir , err , tries , maxTries )
2016-05-15 00:15:42 +08:00
continue
2015-09-03 06:37:42 +08:00
}
2016-05-15 00:15:42 +08:00
if err != nil {
return nil , err
}
break
}
2017-06-12 05:43:31 +08:00
return entries , nil
2015-09-03 06:37:42 +08:00
}
2016-08-06 06:42:49 +08:00
// checkUpload checks to see if an error occurred after the file was
// completely uploaded.
//
// If it was then it waits for a while to see if the file really
// exists and is the right size and returns an updated info.
//
// If the file wasn't found or was the wrong size then it returns the
// original error.
//
// This is a workaround for Amazon sometimes returning
//
// * 408 REQUEST_TIMEOUT
// * 504 GATEWAY_TIMEOUT
// * 500 Internal server error
//
// At the end of large uploads. The speculation is that the timeout
// is waiting for the sha1 hashing to complete and the file may well
// be properly uploaded.
2016-10-17 23:18:56 +08:00
func ( f * Fs ) checkUpload ( resp * http . Response , in io . Reader , src fs . ObjectInfo , inInfo * acd . File , inErr error , uploadTime time . Duration ) ( fixedError bool , info * acd . File , err error ) {
2016-08-06 06:42:49 +08:00
// Return if no error - all is well
if inErr == nil {
return false , inInfo , inErr
}
2016-09-13 00:47:01 +08:00
// If not one of the errors we can fix return
2016-10-17 23:18:56 +08:00
// if resp == nil || resp.StatusCode != 408 && resp.StatusCode != 500 && resp.StatusCode != 504 {
// return false, inInfo, inErr
// }
2016-11-05 21:57:03 +08:00
// The HTTP status
httpStatus := "HTTP status UNKNOWN"
if resp != nil {
httpStatus = resp . Status
}
2016-10-17 23:18:56 +08:00
// check to see if we read to the end
2016-08-06 06:42:49 +08:00
buf := make ( [ ] byte , 1 )
n , err := in . Read ( buf )
if ! ( n == 0 && err == io . EOF ) {
2017-02-09 19:01:20 +08:00
fs . Debugf ( src , "Upload error detected but didn't finish upload: %v (%q)" , inErr , httpStatus )
2016-08-06 06:42:49 +08:00
return false , inInfo , inErr
}
2016-10-17 23:18:56 +08:00
2016-11-05 21:57:03 +08:00
// Don't wait for uploads - assume they will appear later
if * uploadWaitPerGB <= 0 {
2017-02-09 19:01:20 +08:00
fs . Debugf ( src , "Upload error detected but waiting disabled: %v (%q)" , inErr , httpStatus )
2016-10-17 23:18:56 +08:00
return false , inInfo , inErr
}
// Time we should wait for the upload
uploadWaitPerByte := float64 ( * uploadWaitPerGB ) / 1024 / 1024 / 1024
2016-11-05 21:57:03 +08:00
timeToWait := time . Duration ( uploadWaitPerByte * float64 ( src . Size ( ) ) )
2016-10-17 23:18:56 +08:00
const sleepTime = 5 * time . Second // sleep between tries
retries := int ( ( timeToWait + sleepTime - 1 ) / sleepTime ) // number of retries, rounded up
2017-02-09 19:01:20 +08:00
fs . Debugf ( src , "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)" , inErr , httpStatus )
2016-08-06 06:42:49 +08:00
remote := src . Remote ( )
for i := 1 ; i <= retries ; i ++ {
o , err := f . NewObject ( remote )
if err == fs . ErrorObjectNotFound {
2017-02-09 19:01:20 +08:00
fs . Debugf ( src , "Object not found - waiting (%d/%d)" , i , retries )
2016-08-06 06:42:49 +08:00
} else if err != nil {
2017-02-09 19:01:20 +08:00
fs . Debugf ( src , "Object returned error - waiting (%d/%d): %v" , i , retries , err )
2016-08-06 06:42:49 +08:00
} else {
if src . Size ( ) == o . Size ( ) {
2017-02-09 19:01:20 +08:00
fs . Debugf ( src , "Object found with correct size %d after waiting (%d/%d) - %v - returning with no error" , src . Size ( ) , i , retries , sleepTime * time . Duration ( i - 1 ) )
2016-08-06 06:42:49 +08:00
info = & acd . File {
Node : o . ( * Object ) . info ,
}
return true , info , nil
}
2017-02-09 19:01:20 +08:00
fs . Debugf ( src , "Object found but wrong size %d vs %d - waiting (%d/%d)" , src . Size ( ) , o . Size ( ) , i , retries )
2016-08-06 06:42:49 +08:00
}
time . Sleep ( sleepTime )
}
2017-02-09 19:01:20 +08:00
fs . Debugf ( src , "Giving up waiting for object - returning original error: %v (%q)" , inErr , httpStatus )
2016-08-06 06:42:49 +08:00
return false , inInfo , inErr
}
2015-09-03 06:37:42 +08:00
// Put the object into the container
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
2017-05-28 19:44:22 +08:00
func ( f * Fs ) Put ( in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2016-02-18 19:35:25 +08:00
remote := src . Remote ( )
size := src . Size ( )
2015-11-07 19:14:46 +08:00
// Temporary Object under construction
o := & Object {
fs : f ,
2015-09-03 06:37:42 +08:00
remote : remote ,
}
2016-06-12 22:06:27 +08:00
// Check if object already exists
err := o . readMetaData ( )
switch err {
case nil :
2017-07-06 05:16:07 +08:00
return o , o . Update ( in , src , options ... )
2016-06-26 04:23:20 +08:00
case fs . ErrorObjectNotFound :
2016-06-12 22:06:27 +08:00
// Not found so create it
default :
return nil , err
}
// If not create it
2017-03-16 04:55:05 +08:00
leaf , directoryID , err := f . dirCache . FindRootAndPath ( remote , true )
2015-09-03 06:37:42 +08:00
if err != nil {
return nil , err
}
2016-01-04 20:23:33 +08:00
if size > warnFileSize {
2017-02-26 21:23:12 +08:00
fs . Logf ( f , "Warning: file %q may fail because it is too big. Use --max-size=%dM to skip large files." , remote , warnFileSize >> 20 )
2016-01-04 20:23:33 +08:00
}
2015-11-07 19:14:46 +08:00
folder := acd . FolderFromId ( directoryID , o . fs . c . Nodes )
2015-09-10 06:23:37 +08:00
var info * acd . File
var resp * http . Response
2015-09-12 02:18:41 +08:00
err = f . pacer . CallNoRetry ( func ( ) ( bool , error ) {
2016-10-17 23:18:56 +08:00
start := time . Now ( )
2017-01-30 04:35:57 +08:00
f . tokenRenewer . Start ( )
2016-11-18 04:54:40 +08:00
info , resp , err = folder . Put ( in , leaf )
2017-01-30 04:35:57 +08:00
f . tokenRenewer . Stop ( )
2016-08-06 06:42:49 +08:00
var ok bool
2016-10-17 23:18:56 +08:00
ok , info , err = f . checkUpload ( resp , in , src , info , err , time . Since ( start ) )
2016-08-06 06:42:49 +08:00
if ok {
return false , nil
}
2016-05-24 01:03:22 +08:00
return f . shouldRetry ( resp , err )
2015-09-12 02:18:41 +08:00
} )
2015-09-03 06:37:42 +08:00
if err != nil {
return nil , err
}
o . info = info . Node
return o , nil
}
// Mkdir creates the container if it doesn't exist
2016-11-26 05:52:43 +08:00
func ( f * Fs ) Mkdir ( dir string ) error {
err := f . dirCache . FindRoot ( true )
if err != nil {
return err
}
if dir != "" {
_ , err = f . dirCache . FindDir ( dir , true )
}
return err
2015-09-03 06:37:42 +08:00
}
2016-11-01 20:34:47 +08:00
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func ( f * Fs ) Move ( src fs . Object , remote string ) ( fs . Object , error ) {
2016-12-10 00:56:07 +08:00
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
2016-11-01 20:34:47 +08:00
srcObj , ok := src . ( * Object )
if ! ok {
2017-02-09 19:01:20 +08:00
fs . Debugf ( src , "Can't move - not same remote type" )
2016-11-01 20:34:47 +08:00
return nil , fs . ErrorCantMove
}
2016-12-10 00:56:07 +08:00
// create the destination directory if necessary
err := f . dirCache . FindRoot ( true )
if err != nil {
return nil , err
}
srcLeaf , srcDirectoryID , err := srcObj . fs . dirCache . FindPath ( srcObj . remote , false )
if err != nil {
return nil , err
}
dstLeaf , dstDirectoryID , err := f . dirCache . FindPath ( remote , true )
if err != nil {
return nil , err
}
2017-02-04 20:56:21 +08:00
err = f . moveNode ( srcObj . remote , dstLeaf , dstDirectoryID , srcObj . info , srcLeaf , srcDirectoryID , false )
2016-12-10 00:56:07 +08:00
if err != nil {
return nil , err
}
2017-02-04 20:56:21 +08:00
// Wait for directory caching so we can no longer see the old
// object and see the new object
2017-01-20 01:21:14 +08:00
time . Sleep ( 200 * time . Millisecond ) // enough time 90% of the time
2017-03-10 05:32:50 +08:00
var (
dstObj fs . Object
srcErr , dstErr error
)
2017-01-20 01:21:14 +08:00
for i := 1 ; i <= fs . Config . LowLevelRetries ; i ++ {
2017-02-04 20:56:21 +08:00
_ , srcErr = srcObj . fs . NewObject ( srcObj . remote ) // try reading the object
if srcErr != nil && srcErr != fs . ErrorObjectNotFound {
// exit if error on source
return nil , srcErr
}
dstObj , dstErr = f . NewObject ( remote )
if dstErr != nil && dstErr != fs . ErrorObjectNotFound {
// exit if error on dst
return nil , dstErr
}
if srcErr == fs . ErrorObjectNotFound && dstErr == nil {
// finished if src not found and dst found
2017-01-20 01:21:14 +08:00
break
}
2017-02-09 19:01:20 +08:00
fs . Debugf ( src , "Wait for directory listing to update after move %d/%d" , i , fs . Config . LowLevelRetries )
2017-01-20 01:21:14 +08:00
time . Sleep ( 1 * time . Second )
}
2017-03-10 05:32:50 +08:00
return dstObj , dstErr
2016-12-10 00:56:07 +08:00
}
2016-12-09 23:39:29 +08:00
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func ( f * Fs ) DirCacheFlush ( ) {
f . dirCache . ResetRoot ( )
2016-11-08 04:38:57 +08:00
}
2016-11-01 20:34:47 +08:00
2017-02-06 05:20:56 +08:00
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
2016-11-08 04:38:57 +08:00
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
2017-02-06 05:20:56 +08:00
func ( f * Fs ) DirMove ( src fs . Fs , srcRemote , dstRemote string ) ( err error ) {
2016-11-08 04:38:57 +08:00
srcFs , ok := src . ( * Fs )
if ! ok {
2017-02-09 19:01:20 +08:00
fs . Debugf ( src , "DirMove error: not same remote type" )
2016-11-08 04:38:57 +08:00
return fs . ErrorCantDirMove
2016-11-01 20:34:47 +08:00
}
2017-02-06 05:20:56 +08:00
srcPath := path . Join ( srcFs . root , srcRemote )
dstPath := path . Join ( f . root , dstRemote )
2016-11-01 20:34:47 +08:00
2016-12-10 00:56:07 +08:00
// Refuse to move to or from the root
2017-02-06 05:20:56 +08:00
if srcPath == "" || dstPath == "" {
2017-02-09 19:01:20 +08:00
fs . Debugf ( src , "DirMove error: Can't move root" )
2016-12-10 00:56:07 +08:00
return errors . New ( "can't move root directory" )
}
2017-02-06 05:20:56 +08:00
// find the root src directory
err = srcFs . dirCache . FindRoot ( false )
2016-12-10 00:56:07 +08:00
if err != nil {
return err
}
2017-02-06 05:20:56 +08:00
// find the root dst directory
if dstRemote != "" {
err = f . dirCache . FindRoot ( true )
if err != nil {
return err
}
} else {
if f . dirCache . FoundRoot ( ) {
return fs . ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
findPath := dstRemote
if dstRemote == "" {
findPath = f . root
}
dstLeaf , dstDirectoryID , err := f . dirCache . FindPath ( findPath , true )
2016-11-01 20:34:47 +08:00
if err != nil {
2016-12-10 00:56:07 +08:00
return err
2016-11-01 20:34:47 +08:00
}
2017-02-06 05:20:56 +08:00
// Check destination does not exist
if dstRemote != "" {
_ , err = f . dirCache . FindDir ( dstRemote , false )
if err == fs . ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs . ErrorDirExists
}
}
// Find ID of src parent
findPath = srcRemote
var srcDirectoryID string
if srcRemote == "" {
srcDirectoryID , err = srcFs . dirCache . RootParentID ( )
} else {
_ , srcDirectoryID , err = srcFs . dirCache . FindPath ( findPath , false )
}
if err != nil {
return err
}
srcLeaf , _ := dircache . SplitPath ( srcPath )
// Find ID of src
srcID , err := srcFs . dirCache . FindDir ( srcRemote , false )
2016-12-10 00:56:07 +08:00
if err != nil {
return err
}
2016-11-08 04:38:57 +08:00
2016-12-10 00:56:07 +08:00
// FIXME make a proper node.UpdateMetadata command
2017-02-06 05:20:56 +08:00
srcInfo := acd . NodeFromId ( srcID , f . c . Nodes )
2016-11-08 04:38:57 +08:00
var jsonStr string
2016-12-10 00:56:07 +08:00
err = srcFs . pacer . Call ( func ( ) ( bool , error ) {
jsonStr , err = srcInfo . GetMetadata ( )
return srcFs . shouldRetry ( nil , err )
2016-11-08 04:38:57 +08:00
} )
2016-11-01 20:34:47 +08:00
if err != nil {
2017-02-09 19:01:20 +08:00
fs . Debugf ( src , "DirMove error: error reading src metadata: %v" , err )
2016-12-10 00:56:07 +08:00
return err
2016-11-01 20:34:47 +08:00
}
2016-12-10 00:56:07 +08:00
err = json . Unmarshal ( [ ] byte ( jsonStr ) , & srcInfo )
2016-11-01 20:34:47 +08:00
if err != nil {
2017-02-09 19:01:20 +08:00
fs . Debugf ( src , "DirMove error: error reading unpacking src metadata: %v" , err )
2016-12-10 00:56:07 +08:00
return err
2016-11-01 20:34:47 +08:00
}
2016-11-08 04:38:57 +08:00
2017-02-06 05:20:56 +08:00
err = f . moveNode ( srcPath , dstLeaf , dstDirectoryID , srcInfo , srcLeaf , srcDirectoryID , true )
if err != nil {
return err
}
2016-11-08 04:38:57 +08:00
2017-02-06 05:20:56 +08:00
srcFs . dirCache . FlushDir ( srcRemote )
return nil
2016-11-01 20:34:47 +08:00
}
2015-09-03 06:37:42 +08:00
// purgeCheck remotes the root directory, if check is set then it
// refuses to do so if it has anything in
2016-11-26 05:52:43 +08:00
func ( f * Fs ) purgeCheck ( dir string , check bool ) error {
root := path . Join ( f . root , dir )
if root == "" {
2016-06-12 22:06:02 +08:00
return errors . New ( "can't purge root directory" )
2015-09-03 06:37:42 +08:00
}
dc := f . dirCache
2017-01-15 20:18:07 +08:00
err := dc . FindRoot ( false )
if err != nil {
return err
}
2016-11-26 05:52:43 +08:00
rootID , err := dc . FindDir ( dir , false )
2015-09-03 06:37:42 +08:00
if err != nil {
return err
}
if check {
// check directory is empty
empty := true
2015-10-05 05:08:31 +08:00
_ , err = f . listAll ( rootID , "" , false , false , func ( node * acd . Node ) bool {
2015-09-03 06:37:42 +08:00
switch * node . Kind {
case folderKind :
empty = false
return true
case fileKind :
empty = false
return true
default :
2017-02-09 19:01:20 +08:00
fs . Debugf ( "Found ASSET %s" , * node . Id )
2015-09-03 06:37:42 +08:00
}
return false
} )
if err != nil {
return err
}
if ! empty {
2016-06-12 22:06:02 +08:00
return errors . New ( "directory not empty" )
2015-09-03 06:37:42 +08:00
}
}
node := acd . NodeFromId ( rootID , f . c . Nodes )
2015-09-10 06:23:37 +08:00
var resp * http . Response
2015-09-12 02:18:41 +08:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err = node . Trash ( )
2016-05-24 01:03:22 +08:00
return f . shouldRetry ( resp , err )
2015-09-12 02:18:41 +08:00
} )
2015-09-03 06:37:42 +08:00
if err != nil {
return err
}
2016-11-26 05:52:43 +08:00
f . dirCache . FlushDir ( dir )
2015-09-03 06:37:42 +08:00
if err != nil {
return err
}
return nil
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
2016-11-26 05:52:43 +08:00
func ( f * Fs ) Rmdir ( dir string ) error {
return f . purgeCheck ( dir , true )
2015-09-03 06:37:42 +08:00
}
2015-09-23 01:47:16 +08:00
// Precision return the precision of this Fs
2015-11-07 19:14:46 +08:00
func ( f * Fs ) Precision ( ) time . Duration {
2015-09-03 06:37:42 +08:00
return fs . ModTimeNotSupported
}
2016-01-11 20:39:33 +08:00
// Hashes returns the supported hash sets.
2018-01-13 00:30:54 +08:00
func ( f * Fs ) Hashes ( ) hash . Set {
2018-01-19 04:27:52 +08:00
return hash . Set ( hash . MD5 )
2016-01-11 20:39:33 +08:00
}
2015-09-03 06:37:42 +08:00
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
2015-11-07 19:14:46 +08:00
//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
// srcObj, ok := src.(*Object)
2015-09-03 06:37:42 +08:00
// if !ok {
2017-02-09 19:01:20 +08:00
// fs.Debugf(src, "Can't copy - not same remote type")
2015-09-03 06:37:42 +08:00
// return nil, fs.ErrorCantCopy
// }
2015-11-07 19:14:46 +08:00
// srcFs := srcObj.fs
2015-09-03 06:37:42 +08:00
// _, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
// if err != nil {
// return nil, err
// }
2016-06-26 04:58:34 +08:00
// return f.NewObject(remote), nil
2015-09-03 06:37:42 +08:00
//}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
2015-11-07 19:14:46 +08:00
func ( f * Fs ) Purge ( ) error {
2016-11-26 05:52:43 +08:00
return f . purgeCheck ( "" , false )
2015-09-03 06:37:42 +08:00
}
// ------------------------------------------------------------
2015-09-23 01:47:16 +08:00
// Fs returns the parent Fs
2016-02-18 19:35:25 +08:00
func ( o * Object ) Fs ( ) fs . Info {
2015-11-07 19:14:46 +08:00
return o . fs
2015-09-03 06:37:42 +08:00
}
// Return a string version
2015-11-07 19:14:46 +08:00
func ( o * Object ) String ( ) string {
2015-09-03 06:37:42 +08:00
if o == nil {
return "<nil>"
}
return o . remote
}
2015-09-23 01:47:16 +08:00
// Remote returns the remote path
2015-11-07 19:14:46 +08:00
func ( o * Object ) Remote ( ) string {
2015-09-03 06:37:42 +08:00
return o . remote
}
2016-01-11 20:39:33 +08:00
// Hash returns the Md5sum of an object returning a lowercase hex string
2018-01-13 00:30:54 +08:00
func ( o * Object ) Hash ( t hash . Type ) ( string , error ) {
2018-01-19 04:27:52 +08:00
if t != hash . MD5 {
return "" , hash . ErrUnsupported
2016-01-11 20:39:33 +08:00
}
2016-12-01 05:05:35 +08:00
if o . info . ContentProperties != nil && o . info . ContentProperties . Md5 != nil {
2015-09-03 06:37:42 +08:00
return * o . info . ContentProperties . Md5 , nil
}
return "" , nil
}
// Size returns the size of an object in bytes
2015-11-07 19:14:46 +08:00
func ( o * Object ) Size ( ) int64 {
2016-12-01 05:05:35 +08:00
if o . info . ContentProperties != nil && o . info . ContentProperties . Size != nil {
return int64 ( * o . info . ContentProperties . Size )
}
return 0 // Object is likely PENDING
2015-09-03 06:37:42 +08:00
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
2016-06-26 04:23:20 +08:00
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
2015-11-07 19:14:46 +08:00
func ( o * Object ) readMetaData ( ) ( err error ) {
2015-09-03 06:37:42 +08:00
if o . info != nil {
return nil
}
2017-03-16 04:55:05 +08:00
leaf , directoryID , err := o . fs . dirCache . FindRootAndPath ( o . remote , false )
2015-09-03 06:37:42 +08:00
if err != nil {
2016-06-26 04:23:20 +08:00
if err == fs . ErrorDirNotFound {
return fs . ErrorObjectNotFound
}
2015-09-03 06:37:42 +08:00
return err
}
2015-11-07 19:14:46 +08:00
folder := acd . FolderFromId ( directoryID , o . fs . c . Nodes )
2015-09-12 02:18:41 +08:00
var resp * http . Response
var info * acd . File
2015-11-07 19:14:46 +08:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2015-09-12 02:18:41 +08:00
info , resp , err = folder . GetFile ( leaf )
2016-05-24 01:03:22 +08:00
return o . fs . shouldRetry ( resp , err )
2015-09-12 02:18:41 +08:00
} )
2015-09-03 06:37:42 +08:00
if err != nil {
2016-06-26 04:23:20 +08:00
if err == acd . ErrorNodeNotFound {
return fs . ErrorObjectNotFound
}
2015-09-03 06:37:42 +08:00
return err
}
o . info = info . Node
return nil
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
2015-11-07 19:14:46 +08:00
func ( o * Object ) ModTime ( ) time . Time {
2015-09-03 06:37:42 +08:00
err := o . readMetaData ( )
if err != nil {
2017-02-10 01:08:51 +08:00
fs . Debugf ( o , "Failed to read metadata: %v" , err )
2015-09-03 06:37:42 +08:00
return time . Now ( )
}
modTime , err := time . Parse ( timeFormat , * o . info . ModifiedDate )
if err != nil {
2017-02-10 01:08:51 +08:00
fs . Debugf ( o , "Failed to read mtime from object: %v" , err )
2015-09-03 06:37:42 +08:00
return time . Now ( )
}
return modTime
}
2015-09-23 01:47:16 +08:00
// SetModTime sets the modification time of the local fs object
2016-03-22 23:07:10 +08:00
func ( o * Object ) SetModTime ( modTime time . Time ) error {
2015-09-03 06:37:42 +08:00
// FIXME not implemented
2016-03-22 23:07:10 +08:00
return fs . ErrorCantSetModTime
2015-09-03 06:37:42 +08:00
}
2015-09-23 01:47:16 +08:00
// Storable returns a boolean showing whether this object storable
2015-11-07 19:14:46 +08:00
func ( o * Object ) Storable ( ) bool {
2015-09-03 06:37:42 +08:00
return true
}
// Open an object for read
2016-09-10 18:29:57 +08:00
func ( o * Object ) Open ( options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2016-01-31 02:08:44 +08:00
bigObject := o . Size ( ) >= int64 ( tempLinkThreshold )
if bigObject {
2017-04-24 22:36:34 +08:00
fs . Debugf ( o , "Downloading large object via tempLink" )
2016-01-31 02:08:44 +08:00
}
2015-09-03 06:37:42 +08:00
file := acd . File { Node : o . info }
2015-09-10 06:23:37 +08:00
var resp * http . Response
2016-09-10 18:29:57 +08:00
headers := fs . OpenOptionHeaders ( options )
2015-11-07 19:14:46 +08:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2016-01-31 02:08:44 +08:00
if ! bigObject {
2016-09-10 18:29:57 +08:00
in , resp , err = file . OpenHeaders ( headers )
2016-01-31 02:08:44 +08:00
} else {
2016-10-27 01:42:41 +08:00
in , resp , err = file . OpenTempURLHeaders ( rest . ClientWithHeaderReset ( o . fs . noAuthClient , headers ) , headers )
2016-01-31 02:08:44 +08:00
}
2016-05-24 01:03:22 +08:00
return o . fs . shouldRetry ( resp , err )
2015-09-12 02:18:41 +08:00
} )
2015-09-03 06:37:42 +08:00
return in , err
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
2017-05-28 19:44:22 +08:00
func ( o * Object ) Update ( in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) error {
2015-09-03 06:37:42 +08:00
file := acd . File { Node : o . info }
2015-09-10 06:23:37 +08:00
var info * acd . File
var resp * http . Response
var err error
2015-11-07 19:14:46 +08:00
err = o . fs . pacer . CallNoRetry ( func ( ) ( bool , error ) {
2016-10-17 23:18:56 +08:00
start := time . Now ( )
2017-01-30 04:35:57 +08:00
o . fs . tokenRenewer . Start ( )
2016-11-18 04:54:40 +08:00
info , resp , err = file . Overwrite ( in )
2017-01-30 04:35:57 +08:00
o . fs . tokenRenewer . Stop ( )
2016-08-06 06:42:49 +08:00
var ok bool
2016-10-17 23:18:56 +08:00
ok , info , err = o . fs . checkUpload ( resp , in , src , info , err , time . Since ( start ) )
2016-08-06 06:42:49 +08:00
if ok {
return false , nil
}
2016-05-24 01:03:22 +08:00
return o . fs . shouldRetry ( resp , err )
2015-09-12 02:18:41 +08:00
} )
2015-09-03 06:37:42 +08:00
if err != nil {
return err
}
o . info = info . Node
return nil
}
2016-12-10 00:56:07 +08:00
// Remove a node
func ( f * Fs ) removeNode ( info * acd . Node ) error {
2015-09-12 02:18:41 +08:00
var resp * http . Response
var err error
2016-12-10 00:56:07 +08:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err = info . Trash ( )
return f . shouldRetry ( resp , err )
2015-09-12 02:18:41 +08:00
} )
2015-09-03 06:37:42 +08:00
return err
}
2016-12-10 00:56:07 +08:00
// Remove an object
func ( o * Object ) Remove ( ) error {
return o . fs . removeNode ( o . info )
}
// Restore a node
2017-01-14 17:50:45 +08:00
func ( f * Fs ) restoreNode ( info * acd . Node ) ( newInfo * acd . Node , err error ) {
2016-11-08 04:38:57 +08:00
var resp * http . Response
2016-12-10 00:56:07 +08:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2017-01-14 17:50:45 +08:00
newInfo , resp , err = info . Restore ( )
2016-12-10 00:56:07 +08:00
return f . shouldRetry ( resp , err )
2016-11-01 20:34:47 +08:00
} )
2017-01-14 17:50:45 +08:00
return newInfo , err
2016-11-01 20:34:47 +08:00
}
2016-12-10 00:56:07 +08:00
// Changes name of given node
2017-01-14 17:50:45 +08:00
func ( f * Fs ) renameNode ( info * acd . Node , newName string ) ( newInfo * acd . Node , err error ) {
2016-11-01 20:34:47 +08:00
var resp * http . Response
2016-12-10 00:56:07 +08:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2017-01-14 17:50:45 +08:00
newInfo , resp , err = info . Rename ( newName )
2016-12-10 00:56:07 +08:00
return f . shouldRetry ( resp , err )
2016-11-01 20:34:47 +08:00
} )
2017-01-14 17:50:45 +08:00
return newInfo , err
2016-11-01 20:34:47 +08:00
}
// Replaces one parent with another, effectively moving the file. Leaves other
// parents untouched. ReplaceParent cannot be used when the file is trashed.
2016-12-10 00:56:07 +08:00
func ( f * Fs ) replaceParent ( info * acd . Node , oldParentID string , newParentID string ) error {
return f . pacer . Call ( func ( ) ( bool , error ) {
resp , err := info . ReplaceParent ( oldParentID , newParentID )
return f . shouldRetry ( resp , err )
2016-11-01 20:34:47 +08:00
} )
}
// Adds one additional parent to object.
2016-12-10 00:56:07 +08:00
func ( f * Fs ) addParent ( info * acd . Node , newParentID string ) error {
return f . pacer . Call ( func ( ) ( bool , error ) {
resp , err := info . AddParent ( newParentID )
return f . shouldRetry ( resp , err )
2016-11-01 20:34:47 +08:00
} )
}
// Remove given parent from object, leaving the other possible
// parents untouched. Object can end up having no parents.
2016-12-10 00:56:07 +08:00
func ( f * Fs ) removeParent ( info * acd . Node , parentID string ) error {
return f . pacer . Call ( func ( ) ( bool , error ) {
resp , err := info . RemoveParent ( parentID )
return f . shouldRetry ( resp , err )
2016-11-01 20:34:47 +08:00
} )
}
2016-12-10 00:56:07 +08:00
// moveNode moves the node given from the srcLeaf,srcDirectoryID to
// the dstLeaf,dstDirectoryID
2017-02-04 20:56:21 +08:00
func ( f * Fs ) moveNode ( name , dstLeaf , dstDirectoryID string , srcInfo * acd . Node , srcLeaf , srcDirectoryID string , useDirErrorMsgs bool ) ( err error ) {
2017-02-09 19:01:20 +08:00
// fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID)
2016-11-08 04:38:57 +08:00
cantMove := fs . ErrorCantMove
if useDirErrorMsgs {
cantMove = fs . ErrorCantDirMove
}
2016-12-10 00:56:07 +08:00
if len ( srcInfo . Parents ) > 1 && srcLeaf != dstLeaf {
2017-02-09 19:01:20 +08:00
fs . Debugf ( name , "Move error: object is attached to multiple parents and should be renamed. This would change the name of the node in all parents." )
2017-02-04 20:56:21 +08:00
return cantMove
2016-11-08 04:38:57 +08:00
}
2016-12-10 00:56:07 +08:00
if srcLeaf != dstLeaf {
2017-02-09 19:01:20 +08:00
// fs.Debugf(name, "renaming")
2017-02-04 20:56:21 +08:00
_ , err = f . renameNode ( srcInfo , dstLeaf )
2016-11-08 04:38:57 +08:00
if err != nil {
2017-02-09 19:01:20 +08:00
fs . Debugf ( name , "Move: quick path rename failed: %v" , err )
2016-11-08 04:38:57 +08:00
goto OnConflict
}
}
2016-12-10 00:56:07 +08:00
if srcDirectoryID != dstDirectoryID {
2017-02-09 19:01:20 +08:00
// fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID)
2016-12-10 00:56:07 +08:00
err = f . replaceParent ( srcInfo , srcDirectoryID , dstDirectoryID )
2016-11-08 04:38:57 +08:00
if err != nil {
2017-02-09 19:01:20 +08:00
fs . Debugf ( name , "Move: quick path parent replace failed: %v" , err )
2017-02-04 20:56:21 +08:00
return err
2016-11-08 04:38:57 +08:00
}
}
2017-02-04 20:56:21 +08:00
return nil
2016-11-08 04:38:57 +08:00
OnConflict :
2017-02-09 19:01:20 +08:00
fs . Debugf ( name , "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state." )
2016-11-08 04:38:57 +08:00
2017-02-09 19:01:20 +08:00
// fs.Debugf(name, "Trashing file")
2016-12-10 00:56:07 +08:00
err = f . removeNode ( srcInfo )
2016-11-08 04:38:57 +08:00
if err != nil {
2017-02-09 19:01:20 +08:00
fs . Debugf ( name , "Move: remove node failed: %v" , err )
2017-02-04 20:56:21 +08:00
return err
2016-11-08 04:38:57 +08:00
}
2017-02-09 19:01:20 +08:00
// fs.Debugf(name, "Renaming file")
2016-12-10 00:56:07 +08:00
_ , err = f . renameNode ( srcInfo , dstLeaf )
2016-11-08 04:38:57 +08:00
if err != nil {
2017-02-09 19:01:20 +08:00
fs . Debugf ( name , "Move: rename node failed: %v" , err )
2017-02-04 20:56:21 +08:00
return err
2016-11-08 04:38:57 +08:00
}
// note: replacing parent is forbidden by API, modifying them individually is
// okay though
2017-02-09 19:01:20 +08:00
// fs.Debugf(name, "Adding target parent")
2016-12-10 00:56:07 +08:00
err = f . addParent ( srcInfo , dstDirectoryID )
2016-11-08 04:38:57 +08:00
if err != nil {
2017-02-09 19:01:20 +08:00
fs . Debugf ( name , "Move: addParent failed: %v" , err )
2017-02-04 20:56:21 +08:00
return err
2016-11-08 04:38:57 +08:00
}
2017-02-09 19:01:20 +08:00
// fs.Debugf(name, "removing original parent")
2016-12-10 00:56:07 +08:00
err = f . removeParent ( srcInfo , srcDirectoryID )
2016-11-08 04:38:57 +08:00
if err != nil {
2017-02-09 19:01:20 +08:00
fs . Debugf ( name , "Move: removeParent failed: %v" , err )
2017-02-04 20:56:21 +08:00
return err
2016-11-08 04:38:57 +08:00
}
2017-02-09 19:01:20 +08:00
// fs.Debugf(name, "Restoring")
2017-02-04 20:56:21 +08:00
_ , err = f . restoreNode ( srcInfo )
2016-11-08 04:38:57 +08:00
if err != nil {
2017-02-09 19:01:20 +08:00
fs . Debugf ( name , "Move: restoreNode node failed: %v" , err )
2017-02-04 20:56:21 +08:00
return err
2016-11-08 04:38:57 +08:00
}
2017-02-04 20:56:21 +08:00
return nil
2016-11-08 04:38:57 +08:00
}
2016-09-22 05:13:24 +08:00
// MimeType of an Object if known, "" otherwise
func ( o * Object ) MimeType ( ) string {
2016-12-01 05:05:35 +08:00
if o . info . ContentProperties != nil && o . info . ContentProperties . ContentType != nil {
2016-09-22 05:13:24 +08:00
return * o . info . ContentProperties . ContentType
}
return ""
}
2018-03-09 04:03:34 +08:00
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
2017-10-17 03:54:53 +08:00
//
// Automatically restarts itself in case of unexpected behaviour of the remote.
//
// Close the returned channel to stop being notified.
2018-03-09 04:03:34 +08:00
func ( f * Fs ) ChangeNotify ( notifyFunc func ( string , fs . EntryType ) , pollInterval time . Duration ) chan bool {
2017-10-17 03:54:53 +08:00
checkpoint := config . FileGet ( f . name , "checkpoint" )
quit := make ( chan bool )
go func ( ) {
for {
2018-03-09 04:03:34 +08:00
checkpoint = f . changeNotifyRunner ( notifyFunc , checkpoint )
2017-10-17 03:54:53 +08:00
if err := config . SetValueAndSave ( f . name , "checkpoint" , checkpoint ) ; err != nil {
fs . Debugf ( f , "Unable to save checkpoint: %v" , err )
}
select {
case <- quit :
return
case <- time . After ( pollInterval ) :
}
}
} ( )
return quit
}
2018-03-09 04:03:34 +08:00
func ( f * Fs ) changeNotifyRunner ( notifyFunc func ( string , fs . EntryType ) , checkpoint string ) string {
2017-10-17 03:54:53 +08:00
var err error
var resp * http . Response
var reachedEnd bool
var csCount int
var nodeCount int
fs . Debugf ( f , "Checking for changes on remote (Checkpoint %q)" , checkpoint )
err = f . pacer . CallNoRetry ( func ( ) ( bool , error ) {
resp , err = f . c . Changes . GetChangesFunc ( & acd . ChangesOptions {
Checkpoint : checkpoint ,
IncludePurged : true ,
} , func ( changeSet * acd . ChangeSet , err error ) error {
if err != nil {
return err
}
2018-03-09 04:03:34 +08:00
type entryType struct {
path string
entryType fs . EntryType
}
var pathsToClear [ ] entryType
2017-10-17 03:54:53 +08:00
csCount ++
nodeCount += len ( changeSet . Nodes )
if changeSet . End {
reachedEnd = true
}
if changeSet . Checkpoint != "" {
checkpoint = changeSet . Checkpoint
}
for _ , node := range changeSet . Nodes {
if path , ok := f . dirCache . GetInv ( * node . Id ) ; ok {
2018-03-09 04:03:34 +08:00
if node . IsFile ( ) {
pathsToClear = append ( pathsToClear , entryType { path : path , entryType : fs . EntryObject } )
} else {
pathsToClear = append ( pathsToClear , entryType { path : path , entryType : fs . EntryDirectory } )
}
continue
}
if node . IsFile ( ) {
// translate the parent dir of this object
if len ( node . Parents ) > 0 {
if path , ok := f . dirCache . GetInv ( node . Parents [ 0 ] ) ; ok {
// and append the drive file name to compute the full file name
if len ( path ) > 0 {
path = path + "/" + * node . Name
} else {
path = * node . Name
}
// this will now clear the actual file too
pathsToClear = append ( pathsToClear , entryType { path : path , entryType : fs . EntryObject } )
}
} else { // a true root object that is changed
pathsToClear = append ( pathsToClear , entryType { path : * node . Name , entryType : fs . EntryObject } )
}
2017-10-17 03:54:53 +08:00
}
}
2018-03-09 04:03:34 +08:00
visitedPaths := make ( map [ string ] bool )
for _ , entry := range pathsToClear {
if _ , ok := visitedPaths [ entry . path ] ; ok {
2017-10-17 03:54:53 +08:00
continue
}
2018-03-09 04:03:34 +08:00
visitedPaths [ entry . path ] = true
notifyFunc ( entry . path , entry . entryType )
2017-10-17 03:54:53 +08:00
}
return nil
} )
return false , err
} )
fs . Debugf ( f , "Got %d ChangeSets with %d Nodes" , csCount , nodeCount )
if err != nil && err != io . ErrUnexpectedEOF {
fs . Debugf ( f , "Failed to get Changes: %v" , err )
return checkpoint
}
if reachedEnd {
reachedEnd = false
fs . Debugf ( f , "All changes were processed. Waiting for more." )
} else if checkpoint == "" {
fs . Debugf ( f , "Did not get any checkpoint, something went wrong! %+v" , resp )
}
return checkpoint
}
2015-09-03 06:37:42 +08:00
// Check the interfaces are satisfied
var (
2015-11-07 19:14:46 +08:00
_ fs . Fs = ( * Fs ) ( nil )
_ fs . Purger = ( * Fs ) ( nil )
// _ fs.Copier = (*Fs)(nil)
2018-03-09 04:03:34 +08:00
_ fs . Mover = ( * Fs ) ( nil )
_ fs . DirMover = ( * Fs ) ( nil )
_ fs . DirCacheFlusher = ( * Fs ) ( nil )
_ fs . ChangeNotifier = ( * Fs ) ( nil )
_ fs . Object = ( * Object ) ( nil )
_ fs . MimeTyper = & Object { }
2015-09-03 06:37:42 +08:00
)