2017-07-06 05:20:40 +08:00
// Package box provides an interface to the Box
// object storage system.
package box
// FIXME Box only supports file names of 255 characters or less. Names
// that will not be supported are those that contain non-printable
// ascii, / or \, names with trailing spaces, and the special names
// “.” and “..”.
// FIXME box can copy a directory
import (
2019-06-17 16:34:30 +08:00
"context"
2019-09-20 20:26:53 +08:00
"crypto/rsa"
"encoding/json"
"encoding/pem"
2021-11-04 18:12:57 +08:00
"errors"
2017-07-06 05:20:40 +08:00
"fmt"
"io"
"net/http"
"net/url"
2022-08-20 22:38:02 +08:00
"os"
2017-07-06 05:20:40 +08:00
"path"
"strconv"
"strings"
2021-08-23 00:41:30 +08:00
"sync"
"sync/atomic"
2017-07-06 05:20:40 +08:00
"time"
2022-06-25 21:33:12 +08:00
"github.com/golang-jwt/jwt/v4"
2019-07-29 01:47:38 +08:00
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
2019-09-20 20:26:53 +08:00
"github.com/rclone/rclone/fs/fshttp"
2019-07-29 01:47:38 +08:00
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
2021-11-04 18:12:57 +08:00
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/jwtutil"
2019-07-29 01:47:38 +08:00
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
2024-09-25 10:52:04 +08:00
"github.com/rclone/rclone/lib/random"
2019-07-29 01:47:38 +08:00
"github.com/rclone/rclone/lib/rest"
2021-11-04 18:12:57 +08:00
"github.com/youmark/pkcs8"
2017-07-06 05:20:40 +08:00
)
const (
rcloneClientID = "d0374ba6pgmaguie02ge15sv1mllndho"
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
minSleep = 10 * time . Millisecond
maxSleep = 2 * time . Second
2020-04-21 22:34:56 +08:00
decayConstant = 2 // bigger for slower decay, exponential
2017-07-06 05:20:40 +08:00
rootURL = "https://api.box.com/2.0"
uploadURL = "https://upload.box.com/api/2.0"
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
2018-05-15 01:06:57 +08:00
defaultUploadCutoff = 50 * 1024 * 1024
2019-09-20 20:26:53 +08:00
tokenURL = "https://api.box.com/oauth2/token"
2017-07-06 05:20:40 +08:00
)
// Globals
var (
// Description of how to auth for this app
2022-07-26 14:28:37 +08:00
oauthConfig = & oauthutil . Config {
Scopes : nil ,
AuthURL : "https://app.box.com/api/oauth2/authorize" ,
TokenURL : "https://app.box.com/api/oauth2/token" ,
2017-07-06 05:20:40 +08:00
ClientID : rcloneClientID ,
2018-01-19 04:19:55 +08:00
ClientSecret : obscure . MustReveal ( rcloneEncryptedClientSecret ) ,
2017-07-06 05:20:40 +08:00
RedirectURL : oauthutil . RedirectURL ,
}
)
2022-06-25 21:33:12 +08:00
type boxCustomClaims struct {
2023-07-04 23:17:22 +08:00
jwt . StandardClaims
2022-06-25 21:33:12 +08:00
BoxSubType string ` json:"box_sub_type,omitempty" `
}
2017-07-06 05:20:40 +08:00
// Register with Fs
func init ( ) {
fs . Register ( & fs . RegInfo {
Name : "box" ,
Description : "Box" ,
NewFs : NewFs ,
2021-04-29 16:28:18 +08:00
Config : func ( ctx context . Context , name string , m configmap . Mapper , config fs . ConfigIn ) ( * fs . ConfigOut , error ) {
2019-09-20 20:26:53 +08:00
jsonFile , ok := m . Get ( "box_config_file" )
boxSubType , boxSubTypeOk := m . Get ( "box_sub_type" )
2020-05-22 17:59:45 +08:00
boxAccessToken , boxAccessTokenOk := m . Get ( "access_token" )
2019-09-20 20:26:53 +08:00
var err error
2020-05-22 17:59:45 +08:00
// If using box config.json, use JWT auth
2019-09-20 20:26:53 +08:00
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
2020-11-05 19:33:32 +08:00
err = refreshJWTToken ( ctx , jsonFile , boxSubType , name , m )
2019-09-20 20:26:53 +08:00
if err != nil {
2021-11-04 18:12:57 +08:00
return nil , fmt . Errorf ( "failed to configure token with jwt authentication: %w" , err )
2019-09-20 20:26:53 +08:00
}
2020-05-22 17:59:45 +08:00
// Else, if not using an access token, use oauth2
} else if boxAccessToken == "" || ! boxAccessTokenOk {
2021-04-29 16:28:18 +08:00
return oauthutil . ConfigOut ( "" , & oauthutil . Options {
OAuth2Config : oauthConfig ,
} )
2017-07-06 05:20:40 +08:00
}
2021-04-29 16:28:18 +08:00
return nil , nil
2017-07-06 05:20:40 +08:00
} ,
2020-08-02 07:32:21 +08:00
Options : append ( oauthutil . SharedOptions , [ ] fs . Option { {
2023-07-07 00:55:53 +08:00
Name : "root_folder_id" ,
Help : "Fill in for rclone to use a non root folder as its starting point." ,
Default : "0" ,
Advanced : true ,
Sensitive : true ,
2019-09-20 20:26:53 +08:00
} , {
Name : "box_config_file" ,
2021-08-16 17:30:01 +08:00
Help : "Box App config.json location\n\nLeave blank normally." + env . ShellExpandHelp ,
2020-05-22 17:59:45 +08:00
} , {
2023-07-07 00:55:53 +08:00
Name : "access_token" ,
Help : "Box App Primary Access Token\n\nLeave blank normally." ,
Sensitive : true ,
2019-09-20 20:26:53 +08:00
} , {
Name : "box_sub_type" ,
Default : "user" ,
Examples : [ ] fs . OptionExample { {
Value : "user" ,
2021-08-16 17:30:01 +08:00
Help : "Rclone should act on behalf of a user." ,
2019-09-20 20:26:53 +08:00
} , {
Value : "enterprise" ,
2021-08-16 17:30:01 +08:00
Help : "Rclone should act on behalf of a service account." ,
2019-09-20 20:26:53 +08:00
} } ,
2018-05-15 01:06:57 +08:00
} , {
Name : "upload_cutoff" ,
2021-03-03 03:11:57 +08:00
Help : "Cutoff for switching to multipart upload (>= 50 MiB)." ,
2018-05-15 01:06:57 +08:00
Default : fs . SizeSuffix ( defaultUploadCutoff ) ,
Advanced : true ,
2018-07-10 00:12:18 +08:00
} , {
Name : "commit_retries" ,
Help : "Max number of times to try committing a multipart file." ,
Default : 100 ,
Advanced : true ,
2021-08-23 00:21:19 +08:00
} , {
Name : "list_chunk" ,
Default : 1000 ,
Help : "Size of listing chunk 1-1000." ,
Advanced : true ,
2021-08-23 00:51:57 +08:00
} , {
Name : "owned_by" ,
Default : "" ,
Help : "Only show items owned by the login (email address) passed in." ,
Advanced : true ,
2023-09-01 21:43:28 +08:00
} , {
Name : "impersonate" ,
Default : "" ,
Help : ` Impersonate this user ID when using a service account .
2023-09-23 19:20:01 +08:00
Setting this flag allows rclone , when using a JWT service account , to
2023-09-01 21:43:28 +08:00
act on behalf of another user by setting the as - user header .
The user ID is the Box identifier for a user . User IDs can found for
any user via the GET / users endpoint , which is only available to
admins , or by calling the GET / users / me endpoint with an authenticated
user session .
See : https : //developer.box.com/guides/authentication/jwt/as-user/
` ,
Advanced : true ,
Sensitive : true ,
2020-01-15 01:33:35 +08:00
} , {
Name : config . ConfigEncoding ,
Help : config . ConfigEncodingHelp ,
Advanced : true ,
2020-01-15 05:51:49 +08:00
// From https://developer.box.com/docs/error-codes#section-400-bad-request :
// > Box only supports file or folder names that are 255 characters or less.
// > File names containing non-printable ascii, "/" or "\", names with leading
// > or trailing spaces, and the special names “.” and “..” are also unsupported.
//
// Testing revealed names with leading spaces work fine.
// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
Default : ( encoder . Display |
encoder . EncodeBackSlash |
encoder . EncodeRightSpace |
encoder . EncodeInvalidUtf8 ) ,
2020-08-02 07:32:21 +08:00
} } ... ) ,
2017-07-06 05:20:40 +08:00
} )
2018-05-15 01:06:57 +08:00
}
2020-11-05 19:33:32 +08:00
func refreshJWTToken ( ctx context . Context , jsonFile string , boxSubType string , name string , m configmap . Mapper ) error {
2020-06-02 18:54:52 +08:00
jsonFile = env . ShellExpand ( jsonFile )
2020-04-22 23:53:03 +08:00
boxConfig , err := getBoxConfig ( jsonFile )
if err != nil {
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "get box config: %w" , err )
2020-04-22 23:53:03 +08:00
}
privateKey , err := getDecryptedPrivateKey ( boxConfig )
if err != nil {
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "get decrypted private key: %w" , err )
2020-04-22 23:53:03 +08:00
}
claims , err := getClaims ( boxConfig , boxSubType )
if err != nil {
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "get claims: %w" , err )
2020-04-22 23:53:03 +08:00
}
signingHeaders := getSigningHeaders ( boxConfig )
queryParams := getQueryParams ( boxConfig )
2020-11-13 23:24:43 +08:00
client := fshttp . NewClient ( ctx )
2022-06-25 21:33:12 +08:00
err = jwtutil . Config ( "box" , name , tokenURL , * claims , signingHeaders , queryParams , privateKey , m , client )
2020-04-22 23:53:03 +08:00
return err
}
2019-09-20 20:26:53 +08:00
func getBoxConfig ( configFile string ) ( boxConfig * api . ConfigJSON , err error ) {
2022-08-20 22:38:02 +08:00
file , err := os . ReadFile ( configFile )
2019-09-20 20:26:53 +08:00
if err != nil {
2021-11-04 18:12:57 +08:00
return nil , fmt . Errorf ( "box: failed to read Box config: %w" , err )
2019-09-20 20:26:53 +08:00
}
err = json . Unmarshal ( file , & boxConfig )
if err != nil {
2021-11-04 18:12:57 +08:00
return nil , fmt . Errorf ( "box: failed to parse Box config: %w" , err )
2019-09-20 20:26:53 +08:00
}
return boxConfig , nil
}
2022-06-25 21:33:12 +08:00
func getClaims ( boxConfig * api . ConfigJSON , boxSubType string ) ( claims * boxCustomClaims , err error ) {
2019-09-20 20:26:53 +08:00
val , err := jwtutil . RandomHex ( 20 )
if err != nil {
2021-11-04 18:12:57 +08:00
return nil , fmt . Errorf ( "box: failed to generate random string for jti: %w" , err )
2019-09-20 20:26:53 +08:00
}
2022-06-25 21:33:12 +08:00
claims = & boxCustomClaims {
2023-07-04 23:17:22 +08:00
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
StandardClaims : jwt . StandardClaims {
Id : val ,
2022-06-25 21:33:12 +08:00
Issuer : boxConfig . BoxAppSettings . ClientID ,
Subject : boxConfig . EnterpriseID ,
2023-07-04 23:17:22 +08:00
Audience : tokenURL ,
ExpiresAt : time . Now ( ) . Add ( time . Second * 45 ) . Unix ( ) ,
2019-09-20 20:26:53 +08:00
} ,
2022-06-25 21:33:12 +08:00
BoxSubType : boxSubType ,
2019-09-20 20:26:53 +08:00
}
return claims , nil
}
2022-06-25 21:33:12 +08:00
func getSigningHeaders ( boxConfig * api . ConfigJSON ) map [ string ] interface { } {
signingHeaders := map [ string ] interface { } {
"kid" : boxConfig . BoxAppSettings . AppAuth . PublicKeyID ,
2019-09-20 20:26:53 +08:00
}
return signingHeaders
}
func getQueryParams ( boxConfig * api . ConfigJSON ) map [ string ] string {
queryParams := map [ string ] string {
"client_id" : boxConfig . BoxAppSettings . ClientID ,
"client_secret" : boxConfig . BoxAppSettings . ClientSecret ,
}
return queryParams
}
func getDecryptedPrivateKey ( boxConfig * api . ConfigJSON ) ( key * rsa . PrivateKey , err error ) {
block , rest := pem . Decode ( [ ] byte ( boxConfig . BoxAppSettings . AppAuth . PrivateKey ) )
if len ( rest ) > 0 {
2021-11-04 18:12:57 +08:00
return nil , fmt . Errorf ( "box: extra data included in private key: %w" , err )
2019-09-20 20:26:53 +08:00
}
rsaKey , err := pkcs8 . ParsePKCS8PrivateKey ( block . Bytes , [ ] byte ( boxConfig . BoxAppSettings . AppAuth . Passphrase ) )
if err != nil {
2021-11-04 18:12:57 +08:00
return nil , fmt . Errorf ( "box: failed to decrypt private key: %w" , err )
2019-09-20 20:26:53 +08:00
}
return rsaKey . ( * rsa . PrivateKey ) , nil
}
2018-05-15 01:06:57 +08:00
// Options defines the configuration for this backend
type Options struct {
2020-01-15 01:33:35 +08:00
UploadCutoff fs . SizeSuffix ` config:"upload_cutoff" `
CommitRetries int ` config:"commit_retries" `
Enc encoder . MultiEncoder ` config:"encoding" `
2020-04-21 22:34:56 +08:00
RootFolderID string ` config:"root_folder_id" `
2020-05-22 17:59:45 +08:00
AccessToken string ` config:"access_token" `
2021-08-23 00:21:19 +08:00
ListChunk int ` config:"list_chunk" `
2021-08-23 00:51:57 +08:00
OwnedBy string ` config:"owned_by" `
2023-09-01 21:43:28 +08:00
Impersonate string ` config:"impersonate" `
2017-07-06 05:20:40 +08:00
}
2023-08-25 11:40:46 +08:00
// ItemMeta defines metadata we cache for each Item ID
type ItemMeta struct {
SequenceID int64 // the most recent event processed for this item
ParentID string // ID of the parent directory of this item
Name string // leaf name of this item
}
2017-07-06 05:20:40 +08:00
// Fs represents a remote box
type Fs struct {
2023-08-25 11:40:46 +08:00
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features * fs . Features // optional features
srv * rest . Client // the connection to the server
dirCache * dircache . DirCache // Map of directory path to directory id
pacer * fs . Pacer // pacer for API calls
tokenRenewer * oauthutil . Renew // renew the token on expiry
uploadToken * pacer . TokenDispenser // control concurrency
itemMetaCacheMu * sync . Mutex // protects itemMetaCache
itemMetaCache map [ string ] ItemMeta // map of Item ID to selected metadata
2017-07-06 05:20:40 +08:00
}
// Object describes a box object
//
// Will definitely have info but maybe not meta
type Object struct {
fs * Fs // what this object is part of
remote string // The remote path
hasMetaData bool // whether info below has been set
size int64 // size of the object
modTime time . Time // modification time of the object
id string // ID of the object
2018-08-19 21:13:33 +08:00
publicLink string // Public Link for the object
2017-07-06 05:20:40 +08:00
sha1 string // SHA-1 of the object content
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func ( f * Fs ) Name ( ) string {
return f . name
}
// Root of the remote (as passed into NewFs)
func ( f * Fs ) Root ( ) string {
return f . root
}
// String converts this Fs to a string
func ( f * Fs ) String ( ) string {
return fmt . Sprintf ( "box root '%s'" , f . root )
}
// Features returns the optional features of this Fs
func ( f * Fs ) Features ( ) * fs . Features {
return f . features
}
2020-05-20 18:39:20 +08:00
// parsePath parses a box 'url'
2017-07-06 05:20:40 +08:00
func parsePath ( path string ) ( root string ) {
root = strings . Trim ( path , "/" )
return
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = [ ] int {
429 , // Too Many Requests.
500 , // Internal Server Error
502 , // Bad Gateway
503 , // Service Unavailable
504 , // Gateway Timeout
509 , // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
2021-03-11 22:44:01 +08:00
func shouldRetry ( ctx context . Context , resp * http . Response , err error ) ( bool , error ) {
if fserrors . ContextError ( ctx , & err ) {
return false , err
}
2019-02-08 01:41:17 +08:00
authRetry := false
2017-07-06 05:20:40 +08:00
2021-03-24 00:07:24 +08:00
if resp != nil && resp . StatusCode == 401 && strings . Contains ( resp . Header . Get ( "Www-Authenticate" ) , "expired_token" ) {
2019-02-08 01:41:17 +08:00
authRetry = true
2017-07-06 05:20:40 +08:00
fs . Debugf ( nil , "Should retry: %v" , err )
}
2021-08-23 22:57:06 +08:00
// Box API errors which should be retries
if apiErr , ok := err . ( * api . Error ) ; ok && apiErr . Code == "operation_blocked_temporary" {
fs . Debugf ( nil , "Retrying API error %v" , err )
return true , err
}
2019-02-08 01:41:17 +08:00
return authRetry || fserrors . ShouldRetry ( err ) || fserrors . ShouldRetryHTTP ( resp , retryErrorCodes ) , err
2017-07-06 05:20:40 +08:00
}
// readMetaDataForPath reads the metadata from the path
2019-06-17 16:34:30 +08:00
func ( f * Fs ) readMetaDataForPath ( ctx context . Context , path string ) ( info * api . Item , err error ) {
2023-09-09 19:40:06 +08:00
// defer log.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
2020-05-12 00:24:37 +08:00
leaf , directoryID , err := f . dirCache . FindPath ( ctx , path , false )
2017-07-06 05:20:40 +08:00
if err != nil {
if err == fs . ErrorDirNotFound {
return nil , fs . ErrorObjectNotFound
}
return nil , err
}
2023-09-09 19:40:06 +08:00
// Use preupload to find the ID
itemMini , err := f . preUploadCheck ( ctx , leaf , directoryID , - 1 )
2017-07-06 05:20:40 +08:00
if err != nil {
return nil , err
}
2023-09-09 19:40:06 +08:00
if itemMini == nil {
2017-07-06 05:20:40 +08:00
return nil , fs . ErrorObjectNotFound
}
2023-09-09 19:40:06 +08:00
// Now we have the ID we can look up the object proper
opts := rest . Opts {
Method : "GET" ,
Path : "/files/" + itemMini . ID ,
Parameters : fieldsValue ( ) ,
}
var item api . Item
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err := f . srv . CallJSON ( ctx , & opts , nil , & item )
return shouldRetry ( ctx , resp , err )
} )
if err != nil {
return nil , err
}
return & item , nil
2017-07-06 05:20:40 +08:00
}
// errorHandler parses a non 2xx error response into an error
func errorHandler ( resp * http . Response ) error {
// Decode error response
errResponse := new ( api . Error )
err := rest . DecodeJSON ( resp , & errResponse )
if err != nil {
fs . Debugf ( nil , "Couldn't decode error response: %v" , err )
}
if errResponse . Code == "" {
errResponse . Code = resp . Status
}
if errResponse . Status == 0 {
errResponse . Status = resp . StatusCode
}
return errResponse
}
// NewFs constructs an Fs from the path, container:path
2020-11-05 23:18:51 +08:00
func NewFs ( ctx context . Context , name , root string , m configmap . Mapper ) ( fs . Fs , error ) {
2018-05-15 01:06:57 +08:00
// Parse config into Options struct
opt := new ( Options )
err := configstruct . Set ( m , opt )
if err != nil {
return nil , err
}
if opt . UploadCutoff < minUploadCutoff {
2021-11-04 18:12:57 +08:00
return nil , fmt . Errorf ( "box: upload cutoff (%v) must be greater than equal to %v" , opt . UploadCutoff , fs . SizeSuffix ( minUploadCutoff ) )
2017-07-06 05:20:40 +08:00
}
root = parsePath ( root )
2020-05-22 17:59:45 +08:00
2020-11-13 23:24:43 +08:00
client := fshttp . NewClient ( ctx )
2020-05-22 17:59:45 +08:00
var ts * oauthutil . TokenSource
// If not using an accessToken, create an oauth client and tokensource
if opt . AccessToken == "" {
2020-11-06 02:02:26 +08:00
client , ts , err = oauthutil . NewClient ( ctx , name , m , oauthConfig )
2020-05-22 17:59:45 +08:00
if err != nil {
2021-11-04 18:12:57 +08:00
return nil , fmt . Errorf ( "failed to configure Box: %w" , err )
2020-05-22 17:59:45 +08:00
}
2017-07-06 05:20:40 +08:00
}
2020-11-05 19:33:32 +08:00
ci := fs . GetConfig ( ctx )
2017-07-06 05:20:40 +08:00
f := & Fs {
2023-08-25 11:40:46 +08:00
name : name ,
root : root ,
opt : * opt ,
srv : rest . NewClient ( client ) . SetRoot ( rootURL ) ,
pacer : fs . NewPacer ( ctx , pacer . NewDefault ( pacer . MinSleep ( minSleep ) , pacer . MaxSleep ( maxSleep ) , pacer . DecayConstant ( decayConstant ) ) ) ,
uploadToken : pacer . NewTokenDispenser ( ci . Transfers ) ,
itemMetaCacheMu : new ( sync . Mutex ) ,
itemMetaCache : make ( map [ string ] ItemMeta ) ,
2017-07-06 05:20:40 +08:00
}
2017-08-09 22:27:43 +08:00
f . features = ( & fs . Features {
CaseInsensitive : true ,
CanHaveEmptyDirectories : true ,
2020-11-06 00:00:40 +08:00
} ) . Fill ( ctx , f )
2017-07-06 05:20:40 +08:00
f . srv . SetErrorHandler ( errorHandler )
2020-05-22 17:59:45 +08:00
// If using an accessToken, set the Authorization header
if f . opt . AccessToken != "" {
f . srv . SetHeader ( "Authorization" , "Bearer " + f . opt . AccessToken )
}
2023-09-01 21:43:28 +08:00
// If using impersonate set an as-user header
if f . opt . Impersonate != "" {
f . srv . SetHeader ( "as-user" , f . opt . Impersonate )
}
2020-04-22 23:53:03 +08:00
jsonFile , ok := m . Get ( "box_config_file" )
boxSubType , boxSubTypeOk := m . Get ( "box_sub_type" )
2020-05-22 17:59:45 +08:00
if ts != nil {
// If using box config.json and JWT, renewing should just refresh the token and
// should do so whether there are uploads pending or not.
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
f . tokenRenewer = oauthutil . NewRenew ( f . String ( ) , ts , func ( ) error {
2020-11-05 19:33:32 +08:00
err := refreshJWTToken ( ctx , jsonFile , boxSubType , name , m )
2020-05-22 17:59:45 +08:00
return err
} )
f . tokenRenewer . Start ( )
} else {
// Renew the token in the background
f . tokenRenewer = oauthutil . NewRenew ( f . String ( ) , ts , func ( ) error {
_ , err := f . readMetaDataForPath ( ctx , "" )
return err
} )
}
2020-04-22 23:53:03 +08:00
}
2017-07-06 05:20:40 +08:00
2020-04-21 22:34:56 +08:00
// Get rootFolderID
rootID := f . opt . RootFolderID
2017-07-06 05:20:40 +08:00
f . dirCache = dircache . New ( root , rootID , f )
// Find the current root
2019-06-17 16:34:30 +08:00
err = f . dirCache . FindRoot ( ctx , false )
2017-07-06 05:20:40 +08:00
if err != nil {
// Assume it is a file
newRoot , remote := dircache . SplitPath ( root )
2018-10-14 21:41:26 +08:00
tempF := * f
tempF . dirCache = dircache . New ( newRoot , rootID , & tempF )
tempF . root = newRoot
2017-07-06 05:20:40 +08:00
// Make new Fs which is the parent
2019-06-17 16:34:30 +08:00
err = tempF . dirCache . FindRoot ( ctx , false )
2017-07-06 05:20:40 +08:00
if err != nil {
// No root so return old f
return f , nil
}
2019-06-17 16:34:30 +08:00
_ , err := tempF . newObjectWithInfo ( ctx , remote , nil )
2017-07-06 05:20:40 +08:00
if err != nil {
if err == fs . ErrorObjectNotFound {
// File doesn't exist so return old f
return f , nil
}
return nil , err
}
2020-11-06 00:00:40 +08:00
f . features . Fill ( ctx , & tempF )
2018-10-14 21:41:26 +08:00
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
2019-07-29 01:47:38 +08:00
// See https://github.com/rclone/rclone/issues/2182
2018-10-14 21:41:26 +08:00
f . dirCache = tempF . dirCache
f . root = tempF . root
2017-07-06 05:20:40 +08:00
// return an error with an fs which points to the parent
2018-10-14 21:41:26 +08:00
return f , fs . ErrorIsFile
2017-07-06 05:20:40 +08:00
}
return f , nil
}
// rootSlash returns root with a slash on if it is empty, otherwise empty string
func ( f * Fs ) rootSlash ( ) string {
if f . root == "" {
return f . root
}
return f . root + "/"
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
2019-06-17 16:34:30 +08:00
func ( f * Fs ) newObjectWithInfo ( ctx context . Context , remote string , info * api . Item ) ( fs . Object , error ) {
2017-07-06 05:20:40 +08:00
o := & Object {
fs : f ,
remote : remote ,
}
var err error
if info != nil {
// Set info
err = o . setMetaData ( info )
} else {
2019-06-17 16:34:30 +08:00
err = o . readMetaData ( ctx ) // reads info and meta, returning an error
2017-07-06 05:20:40 +08:00
}
if err != nil {
return nil , err
}
return o , nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
2019-06-17 16:34:30 +08:00
func ( f * Fs ) NewObject ( ctx context . Context , remote string ) ( fs . Object , error ) {
return f . newObjectWithInfo ( ctx , remote , nil )
2017-07-06 05:20:40 +08:00
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
2019-06-17 16:34:30 +08:00
func ( f * Fs ) FindLeaf ( ctx context . Context , pathID , leaf string ) ( pathIDOut string , found bool , err error ) {
2017-07-06 05:20:40 +08:00
// Find the leaf in pathID
2021-08-23 00:40:13 +08:00
found , err = f . listAll ( ctx , pathID , true , false , true , func ( item * api . Item ) bool {
2021-01-26 22:47:34 +08:00
if strings . EqualFold ( item . Name , leaf ) {
2017-07-06 05:20:40 +08:00
pathIDOut = item . ID
return true
}
return false
} )
return pathIDOut , found , err
}
// fieldsValue creates a url.Values with fields set to those in api.Item
func fieldsValue ( ) url . Values {
values := url . Values { }
values . Set ( "fields" , api . ItemFields )
return values
}
// CreateDir makes a directory with pathID as parent and name leaf
2019-06-17 16:34:30 +08:00
func ( f * Fs ) CreateDir ( ctx context . Context , pathID , leaf string ) ( newID string , err error ) {
2017-07-06 05:20:40 +08:00
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
var resp * http . Response
var info * api . Item
opts := rest . Opts {
Method : "POST" ,
Path : "/folders" ,
Parameters : fieldsValue ( ) ,
}
mkdir := api . CreateFolder {
2020-01-15 01:33:35 +08:00
Name : f . opt . Enc . FromStandardName ( leaf ) ,
2017-07-06 05:20:40 +08:00
Parent : api . Parent {
ID : pathID ,
} ,
}
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = f . srv . CallJSON ( ctx , & opts , & mkdir , & info )
2021-03-11 22:44:01 +08:00
return shouldRetry ( ctx , resp , err )
2017-07-06 05:20:40 +08:00
} )
if err != nil {
2024-09-25 10:52:04 +08:00
// fmt.Printf("...Error %v\n", err)
2017-07-06 05:20:40 +08:00
return "" , err
}
// fmt.Printf("...Id %q\n", *info.Id)
return info . ID , nil
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
// User function to process a File item from listAll
//
// Should return true to finish processing
type listAllFn func ( * api . Item ) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
2021-08-23 00:40:13 +08:00
func ( f * Fs ) listAll ( ctx context . Context , dirID string , directoriesOnly bool , filesOnly bool , activeOnly bool , fn listAllFn ) ( found bool , err error ) {
2017-07-06 05:20:40 +08:00
opts := rest . Opts {
Method : "GET" ,
Path : "/folders/" + dirID + "/items" ,
Parameters : fieldsValue ( ) ,
}
2021-08-23 00:21:19 +08:00
opts . Parameters . Set ( "limit" , strconv . Itoa ( f . opt . ListChunk ) )
2021-08-20 17:13:33 +08:00
opts . Parameters . Set ( "usemarker" , "true" )
var marker * string
2017-07-06 05:20:40 +08:00
OUTER :
for {
2021-08-20 17:13:33 +08:00
if marker != nil {
opts . Parameters . Set ( "marker" , * marker )
}
2017-07-06 05:20:40 +08:00
var result api . FolderItems
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = f . srv . CallJSON ( ctx , & opts , nil , & result )
2021-03-11 22:44:01 +08:00
return shouldRetry ( ctx , resp , err )
2017-07-06 05:20:40 +08:00
} )
if err != nil {
2021-11-04 18:12:57 +08:00
return found , fmt . Errorf ( "couldn't list files: %w" , err )
2017-07-06 05:20:40 +08:00
}
for i := range result . Entries {
item := & result . Entries [ i ]
if item . Type == api . ItemTypeFolder {
if filesOnly {
continue
}
} else if item . Type == api . ItemTypeFile {
if directoriesOnly {
continue
}
} else {
fs . Debugf ( f , "Ignoring %q - unknown type %q" , item . Name , item . Type )
continue
}
2021-08-23 00:40:13 +08:00
if activeOnly && item . ItemStatus != api . ItemStatusActive {
2017-07-06 05:20:40 +08:00
continue
}
2021-08-23 00:51:57 +08:00
if f . opt . OwnedBy != "" && f . opt . OwnedBy != item . OwnedBy . Login {
continue
}
2020-01-15 01:33:35 +08:00
item . Name = f . opt . Enc . ToStandardName ( item . Name )
2017-07-06 05:20:40 +08:00
if fn ( item ) {
found = true
break OUTER
}
}
2021-08-20 17:13:33 +08:00
marker = result . NextMarker
if marker == nil {
2017-07-06 05:20:40 +08:00
break
}
}
return
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
2019-06-17 16:34:30 +08:00
func ( f * Fs ) List ( ctx context . Context , dir string ) ( entries fs . DirEntries , err error ) {
directoryID , err := f . dirCache . FindDir ( ctx , dir , false )
2017-07-06 05:20:40 +08:00
if err != nil {
return nil , err
}
var iErr error
2021-08-23 00:40:13 +08:00
_ , err = f . listAll ( ctx , directoryID , false , false , true , func ( info * api . Item ) bool {
2017-07-06 05:20:40 +08:00
remote := path . Join ( dir , info . Name )
if info . Type == api . ItemTypeFolder {
// cache the directory ID for later lookups
f . dirCache . Put ( remote , info . ID )
2017-08-02 23:44:36 +08:00
d := fs . NewDir ( remote , info . ModTime ( ) ) . SetID ( info . ID )
2017-07-06 05:20:40 +08:00
// FIXME more info from dir?
entries = append ( entries , d )
} else if info . Type == api . ItemTypeFile {
2019-06-17 16:34:30 +08:00
o , err := f . newObjectWithInfo ( ctx , remote , info )
2017-07-06 05:20:40 +08:00
if err != nil {
iErr = err
return true
}
entries = append ( entries , o )
}
2023-08-25 11:40:46 +08:00
// Cache some metadata for this Item to help us process events later
// on. In particular, the box event API does not provide the old path
// of the Item when it is renamed/deleted/moved/etc.
f . itemMetaCacheMu . Lock ( )
cachedItemMeta , found := f . itemMetaCache [ info . ID ]
if ! found || cachedItemMeta . SequenceID < info . SequenceID {
f . itemMetaCache [ info . ID ] = ItemMeta { SequenceID : info . SequenceID , ParentID : directoryID , Name : info . Name }
}
f . itemMetaCacheMu . Unlock ( )
2017-07-06 05:20:40 +08:00
return false
} )
if err != nil {
return nil , err
}
if iErr != nil {
return nil , iErr
}
return entries , nil
}
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
2022-08-05 23:35:41 +08:00
// Returns the object, leaf, directoryID and error.
2017-07-06 05:20:40 +08:00
//
// Used to create new objects
2019-06-17 16:34:30 +08:00
func ( f * Fs ) createObject ( ctx context . Context , remote string , modTime time . Time , size int64 ) ( o * Object , leaf string , directoryID string , err error ) {
2017-07-06 05:20:40 +08:00
// Create the directory for the object if it doesn't exist
2020-05-12 00:24:37 +08:00
leaf , directoryID , err = f . dirCache . FindPath ( ctx , remote , true )
2017-07-06 05:20:40 +08:00
if err != nil {
return
}
// Temporary Object under construction
o = & Object {
fs : f ,
remote : remote ,
}
return o , leaf , directoryID , nil
}
2021-04-25 03:11:15 +08:00
// preUploadCheck checks to see if a file can be uploaded
//
// It returns "", nil if the file is good to go
// It returns "ID", nil if the file must be updated
2023-09-09 19:40:06 +08:00
func ( f * Fs ) preUploadCheck ( ctx context . Context , leaf , directoryID string , size int64 ) ( item * api . ItemMini , err error ) {
2021-04-25 03:11:15 +08:00
check := api . PreUploadCheck {
Name : f . opt . Enc . FromStandardName ( leaf ) ,
Parent : api . Parent {
ID : directoryID ,
} ,
}
if size >= 0 {
check . Size = & size
}
opts := rest . Opts {
Method : "OPTIONS" ,
Path : "/files/content/" ,
}
var result api . PreUploadCheckResponse
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err = f . srv . CallJSON ( ctx , & opts , & check , & result )
return shouldRetry ( ctx , resp , err )
} )
if err != nil {
if apiErr , ok := err . ( * api . Error ) ; ok && apiErr . Code == "item_name_in_use" {
var conflict api . PreUploadCheckConflict
err = json . Unmarshal ( apiErr . ContextInfo , & conflict )
if err != nil {
2023-09-09 19:40:06 +08:00
return nil , fmt . Errorf ( "pre-upload check: JSON decode failed: %w" , err )
2021-04-25 03:11:15 +08:00
}
if conflict . Conflicts . Type != api . ItemTypeFile {
2023-09-09 19:40:06 +08:00
return nil , fs . ErrorIsDir
2021-04-25 03:11:15 +08:00
}
2023-09-09 19:40:06 +08:00
return & conflict . Conflicts , nil
2021-04-25 03:11:15 +08:00
}
2023-09-09 19:40:06 +08:00
return nil , fmt . Errorf ( "pre-upload check: %w" , err )
2021-04-25 03:11:15 +08:00
}
2023-09-09 19:40:06 +08:00
return nil , nil
2021-04-25 03:11:15 +08:00
}
2017-07-06 05:20:40 +08:00
// Put the object
//
2022-08-05 23:35:41 +08:00
// Copy the reader in to the new object which is returned.
2017-07-06 05:20:40 +08:00
//
// The new object may have been created if an error is returned
2019-06-17 16:34:30 +08:00
func ( f * Fs ) Put ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2021-04-25 03:11:15 +08:00
// If directory doesn't exist, file doesn't exist so can upload
remote := src . Remote ( )
leaf , directoryID , err := f . dirCache . FindPath ( ctx , remote , false )
if err != nil {
if err == fs . ErrorDirNotFound {
return f . PutUnchecked ( ctx , in , src , options ... )
}
return nil , err
}
// Preflight check the upload, which returns the ID if the
// object already exists
2023-09-09 19:40:06 +08:00
item , err := f . preUploadCheck ( ctx , leaf , directoryID , src . Size ( ) )
2021-04-25 03:11:15 +08:00
if err != nil {
2017-07-06 05:20:40 +08:00
return nil , err
}
2023-09-09 19:40:06 +08:00
if item == nil {
2021-04-25 03:11:15 +08:00
return f . PutUnchecked ( ctx , in , src , options ... )
}
// If object exists then create a skeleton one with just id
o := & Object {
fs : f ,
remote : remote ,
2023-09-09 19:40:06 +08:00
id : item . ID ,
2021-04-25 03:11:15 +08:00
}
return o , o . Update ( ctx , in , src , options ... )
2017-07-06 05:20:40 +08:00
}
2017-08-19 18:32:56 +08:00
// PutStream uploads to the remote path with the modTime given of indeterminate size
2019-06-17 16:34:30 +08:00
func ( f * Fs ) PutStream ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
return f . Put ( ctx , in , src , options ... )
2017-08-19 18:32:56 +08:00
}
2017-07-06 05:20:40 +08:00
// PutUnchecked the object into the container
//
2022-08-05 23:35:41 +08:00
// This will produce an error if the object already exists.
2017-07-06 05:20:40 +08:00
//
2022-08-05 23:35:41 +08:00
// Copy the reader in to the new object which is returned.
2017-07-06 05:20:40 +08:00
//
// The new object may have been created if an error is returned
2019-06-17 16:34:30 +08:00
func ( f * Fs ) PutUnchecked ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2017-07-06 05:20:40 +08:00
remote := src . Remote ( )
size := src . Size ( )
2019-06-17 16:34:30 +08:00
modTime := src . ModTime ( ctx )
2017-07-06 05:20:40 +08:00
2019-06-17 16:34:30 +08:00
o , _ , _ , err := f . createObject ( ctx , remote , modTime , size )
2017-07-06 05:20:40 +08:00
if err != nil {
return nil , err
}
2019-06-17 16:34:30 +08:00
return o , o . Update ( ctx , in , src , options ... )
2017-07-06 05:20:40 +08:00
}
// Mkdir creates the container if it doesn't exist
2019-06-17 16:34:30 +08:00
func ( f * Fs ) Mkdir ( ctx context . Context , dir string ) error {
2020-05-12 00:24:37 +08:00
_ , err := f . dirCache . FindDir ( ctx , dir , true )
2017-07-06 05:20:40 +08:00
return err
}
// deleteObject removes an object by ID
2019-09-05 03:00:37 +08:00
func ( f * Fs ) deleteObject ( ctx context . Context , id string ) error {
2017-07-06 05:20:40 +08:00
opts := rest . Opts {
Method : "DELETE" ,
Path : "/files/" + id ,
NoResponse : true ,
}
return f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err := f . srv . Call ( ctx , & opts )
2021-03-11 22:44:01 +08:00
return shouldRetry ( ctx , resp , err )
2017-07-06 05:20:40 +08:00
} )
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
2019-06-17 16:34:30 +08:00
func ( f * Fs ) purgeCheck ( ctx context . Context , dir string , check bool ) error {
2017-07-06 05:20:40 +08:00
root := path . Join ( f . root , dir )
if root == "" {
return errors . New ( "can't purge root directory" )
}
dc := f . dirCache
2019-06-17 16:34:30 +08:00
rootID , err := dc . FindDir ( ctx , dir , false )
2017-07-06 05:20:40 +08:00
if err != nil {
return err
}
opts := rest . Opts {
Method : "DELETE" ,
Path : "/folders/" + rootID ,
Parameters : url . Values { } ,
NoResponse : true ,
}
opts . Parameters . Set ( "recursive" , strconv . FormatBool ( ! check ) )
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = f . srv . Call ( ctx , & opts )
2021-03-11 22:44:01 +08:00
return shouldRetry ( ctx , resp , err )
2017-07-06 05:20:40 +08:00
} )
if err != nil {
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "rmdir failed: %w" , err )
2017-07-06 05:20:40 +08:00
}
f . dirCache . FlushDir ( dir )
if err != nil {
return err
}
return nil
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
2019-06-17 16:34:30 +08:00
func ( f * Fs ) Rmdir ( ctx context . Context , dir string ) error {
return f . purgeCheck ( ctx , dir , true )
2017-07-06 05:20:40 +08:00
}
// Precision return the precision of this Fs
func ( f * Fs ) Precision ( ) time . Duration {
return time . Second
}
2020-10-14 05:43:40 +08:00
// Copy src to this remote using server-side copy operations.
2017-07-06 05:20:40 +08:00
//
2022-08-05 23:35:41 +08:00
// This is stored with the remote path given.
2017-07-06 05:20:40 +08:00
//
2022-08-05 23:35:41 +08:00
// It returns the destination Object and a possible error.
2017-07-06 05:20:40 +08:00
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
2019-06-17 16:34:30 +08:00
func ( f * Fs ) Copy ( ctx context . Context , src fs . Object , remote string ) ( fs . Object , error ) {
2017-07-06 05:20:40 +08:00
srcObj , ok := src . ( * Object )
if ! ok {
fs . Debugf ( src , "Can't copy - not same remote type" )
return nil , fs . ErrorCantCopy
}
2019-06-17 16:34:30 +08:00
err := srcObj . readMetaData ( ctx )
2017-07-06 05:20:40 +08:00
if err != nil {
return nil , err
}
srcPath := srcObj . fs . rootSlash ( ) + srcObj . remote
dstPath := f . rootSlash ( ) + remote
2022-06-09 04:25:17 +08:00
if strings . EqualFold ( srcPath , dstPath ) {
2021-11-04 18:12:57 +08:00
return nil , fmt . Errorf ( "can't copy %q -> %q as are same name when lowercase" , srcPath , dstPath )
2017-07-06 05:20:40 +08:00
}
// Create temporary object
2019-06-17 16:34:30 +08:00
dstObj , leaf , directoryID , err := f . createObject ( ctx , remote , srcObj . modTime , srcObj . size )
2017-07-06 05:20:40 +08:00
if err != nil {
return nil , err
}
2024-09-25 10:52:04 +08:00
// check if dest already exists
item , err := f . preUploadCheck ( ctx , leaf , directoryID , src . Size ( ) )
if err != nil {
return nil , err
}
if item != nil { // dest already exists, need to copy to temp name and then move
tempSuffix := "-rclone-copy-" + random . String ( 8 )
fs . Debugf ( remote , "dst already exists, copying to temp name %v" , remote + tempSuffix )
tempObj , err := f . Copy ( ctx , src , remote + tempSuffix )
if err != nil {
return nil , err
}
fs . Debugf ( remote + tempSuffix , "moving to real name %v" , remote )
err = f . deleteObject ( ctx , item . ID )
if err != nil {
return nil , err
}
return f . Move ( ctx , tempObj , remote )
}
2017-07-06 05:20:40 +08:00
// Copy the object
opts := rest . Opts {
Method : "POST" ,
Path : "/files/" + srcObj . id + "/copy" ,
Parameters : fieldsValue ( ) ,
}
2018-08-04 18:16:43 +08:00
copyFile := api . CopyFile {
2020-01-15 01:33:35 +08:00
Name : f . opt . Enc . FromStandardName ( leaf ) ,
2017-07-06 05:20:40 +08:00
Parent : api . Parent {
ID : directoryID ,
} ,
}
var resp * http . Response
var info * api . Item
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = f . srv . CallJSON ( ctx , & opts , & copyFile , & info )
2021-03-11 22:44:01 +08:00
return shouldRetry ( ctx , resp , err )
2017-07-06 05:20:40 +08:00
} )
if err != nil {
return nil , err
}
err = dstObj . setMetaData ( info )
if err != nil {
return nil , err
}
return dstObj , nil
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
2020-06-05 05:25:14 +08:00
func ( f * Fs ) Purge ( ctx context . Context , dir string ) error {
return f . purgeCheck ( ctx , dir , false )
2017-07-06 05:20:40 +08:00
}
// move a file or folder
2019-09-05 03:00:37 +08:00
func ( f * Fs ) move ( ctx context . Context , endpoint , id , leaf , directoryID string ) ( info * api . Item , err error ) {
2017-07-06 05:20:40 +08:00
// Move the object
opts := rest . Opts {
Method : "PUT" ,
Path : endpoint + id ,
Parameters : fieldsValue ( ) ,
}
move := api . UpdateFileMove {
2020-01-15 01:33:35 +08:00
Name : f . opt . Enc . FromStandardName ( leaf ) ,
2017-07-06 05:20:40 +08:00
Parent : api . Parent {
ID : directoryID ,
} ,
}
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = f . srv . CallJSON ( ctx , & opts , & move , & info )
2021-03-11 22:44:01 +08:00
return shouldRetry ( ctx , resp , err )
2017-07-06 05:20:40 +08:00
} )
if err != nil {
return nil , err
}
return info , nil
}
2020-05-23 20:11:22 +08:00
// About gets quota information
func ( f * Fs ) About ( ctx context . Context ) ( usage * fs . Usage , err error ) {
opts := rest . Opts {
Method : "GET" ,
Path : "/users/me" ,
}
var user api . User
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err = f . srv . CallJSON ( ctx , & opts , nil , & user )
2021-03-11 22:44:01 +08:00
return shouldRetry ( ctx , resp , err )
2020-05-23 20:11:22 +08:00
} )
if err != nil {
2021-11-04 18:12:57 +08:00
return nil , fmt . Errorf ( "failed to read user info: %w" , err )
2020-05-23 20:11:22 +08:00
}
// FIXME max upload size would be useful to use in Update
usage = & fs . Usage {
2020-05-25 23:47:34 +08:00
Used : fs . NewUsageValue ( user . SpaceUsed ) , // bytes in use
Total : fs . NewUsageValue ( user . SpaceAmount ) , // bytes total
Free : fs . NewUsageValue ( user . SpaceAmount - user . SpaceUsed ) , // bytes free
2020-05-23 20:11:22 +08:00
}
return usage , nil
}
2020-10-14 05:43:40 +08:00
// Move src to this remote using server-side move operations.
2017-07-06 05:20:40 +08:00
//
2022-08-05 23:35:41 +08:00
// This is stored with the remote path given.
2017-07-06 05:20:40 +08:00
//
2022-08-05 23:35:41 +08:00
// It returns the destination Object and a possible error.
2017-07-06 05:20:40 +08:00
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
2019-06-17 16:34:30 +08:00
func ( f * Fs ) Move ( ctx context . Context , src fs . Object , remote string ) ( fs . Object , error ) {
2017-07-06 05:20:40 +08:00
srcObj , ok := src . ( * Object )
if ! ok {
fs . Debugf ( src , "Can't move - not same remote type" )
return nil , fs . ErrorCantMove
}
// Create temporary object
2019-06-17 16:34:30 +08:00
dstObj , leaf , directoryID , err := f . createObject ( ctx , remote , srcObj . modTime , srcObj . size )
2017-07-06 05:20:40 +08:00
if err != nil {
return nil , err
}
// Do the move
2019-09-05 03:00:37 +08:00
info , err := f . move ( ctx , "/files/" , srcObj . id , leaf , directoryID )
2017-07-06 05:20:40 +08:00
if err != nil {
return nil , err
}
err = dstObj . setMetaData ( info )
if err != nil {
return nil , err
}
return dstObj , nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
2020-10-14 05:43:40 +08:00
// using server-side move operations.
2017-07-06 05:20:40 +08:00
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
2019-06-17 16:34:30 +08:00
func ( f * Fs ) DirMove ( ctx context . Context , src fs . Fs , srcRemote , dstRemote string ) error {
2017-07-06 05:20:40 +08:00
srcFs , ok := src . ( * Fs )
if ! ok {
fs . Debugf ( srcFs , "Can't move directory - not same remote type" )
return fs . ErrorCantDirMove
}
2020-05-12 18:39:30 +08:00
srcID , _ , _ , dstDirectoryID , dstLeaf , err := f . dirCache . DirMove ( ctx , srcFs . dirCache , srcFs . root , srcRemote , f . root , dstRemote )
2017-07-06 05:20:40 +08:00
if err != nil {
return err
}
// Do the move
2020-05-12 18:39:30 +08:00
_ , err = f . move ( ctx , "/folders/" , srcID , dstLeaf , dstDirectoryID )
2017-07-06 05:20:40 +08:00
if err != nil {
return err
}
srcFs . dirCache . FlushDir ( srcRemote )
return nil
}
2018-08-19 21:13:33 +08:00
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
2020-06-01 05:18:01 +08:00
func ( f * Fs ) PublicLink ( ctx context . Context , remote string , expire fs . Duration , unlink bool ) ( string , error ) {
2019-06-17 16:34:30 +08:00
id , err := f . dirCache . FindDir ( ctx , remote , false )
2018-08-19 21:13:33 +08:00
var opts rest . Opts
if err == nil {
fs . Debugf ( f , "attempting to share directory '%s'" , remote )
opts = rest . Opts {
Method : "PUT" ,
Path : "/folders/" + id ,
Parameters : fieldsValue ( ) ,
}
} else {
fs . Debugf ( f , "attempting to share single file '%s'" , remote )
2019-06-17 16:34:30 +08:00
o , err := f . NewObject ( ctx , remote )
2018-08-19 21:13:33 +08:00
if err != nil {
return "" , err
}
if o . ( * Object ) . publicLink != "" {
return o . ( * Object ) . publicLink , nil
}
opts = rest . Opts {
Method : "PUT" ,
Path : "/files/" + o . ( * Object ) . id ,
Parameters : fieldsValue ( ) ,
}
}
shareLink := api . CreateSharedLink { }
var info api . Item
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = f . srv . CallJSON ( ctx , & opts , & shareLink , & info )
2021-03-11 22:44:01 +08:00
return shouldRetry ( ctx , resp , err )
2018-08-19 21:13:33 +08:00
} )
return info . SharedLink . URL , err
}
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 08:17:24 +08:00
// deletePermanently permanently deletes a trashed file
2020-06-17 20:04:33 +08:00
func ( f * Fs ) deletePermanently ( ctx context . Context , itemType , id string ) error {
opts := rest . Opts {
Method : "DELETE" ,
NoResponse : true ,
}
if itemType == api . ItemTypeFile {
opts . Path = "/files/" + id + "/trash"
} else {
opts . Path = "/folders/" + id + "/trash"
}
return f . pacer . Call ( func ( ) ( bool , error ) {
resp , err := f . srv . Call ( ctx , & opts )
2021-03-11 22:44:01 +08:00
return shouldRetry ( ctx , resp , err )
2020-06-17 20:04:33 +08:00
} )
}
// CleanUp empties the trash
func ( f * Fs ) CleanUp ( ctx context . Context ) ( err error ) {
2021-08-23 00:41:30 +08:00
var (
2023-08-18 21:14:51 +08:00
deleteErrors atomic . Uint64
2021-08-23 00:41:30 +08:00
concurrencyControl = make ( chan struct { } , fs . GetConfig ( ctx ) . Checkers )
wg sync . WaitGroup
)
2021-08-23 00:40:13 +08:00
_ , err = f . listAll ( ctx , "trash" , false , false , false , func ( item * api . Item ) bool {
if item . Type == api . ItemTypeFolder || item . Type == api . ItemTypeFile {
2021-08-23 00:41:30 +08:00
wg . Add ( 1 )
concurrencyControl <- struct { } { }
go func ( ) {
defer func ( ) {
<- concurrencyControl
wg . Done ( )
} ( )
err := f . deletePermanently ( ctx , item . Type , item . ID )
if err != nil {
fs . Errorf ( f , "failed to delete trash item %q (%q): %v" , item . Name , item . ID , err )
2023-08-18 21:14:51 +08:00
deleteErrors . Add ( 1 )
2021-08-23 00:41:30 +08:00
}
} ( )
2021-08-23 00:40:13 +08:00
} else {
fs . Debugf ( f , "Ignoring %q - unknown type %q" , item . Name , item . Type )
2020-06-17 20:04:33 +08:00
}
2021-08-23 00:40:13 +08:00
return false
} )
2021-08-23 00:41:30 +08:00
wg . Wait ( )
2023-08-18 21:14:51 +08:00
if deleteErrors . Load ( ) != 0 {
return fmt . Errorf ( "failed to delete %d trash items" , deleteErrors . Load ( ) )
2021-08-20 17:22:35 +08:00
}
2021-08-23 00:40:13 +08:00
return err
2020-06-17 20:04:33 +08:00
}
2023-12-08 12:33:51 +08:00
// Shutdown shutdown the fs
func ( f * Fs ) Shutdown ( ctx context . Context ) error {
f . tokenRenewer . Shutdown ( )
return nil
}
2023-08-25 11:40:46 +08:00
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
func ( f * Fs ) ChangeNotify ( ctx context . Context , notifyFunc func ( string , fs . EntryType ) , pollIntervalChan <- chan time . Duration ) {
go func ( ) {
// get the `stream_position` early so all changes from now on get processed
streamPosition , err := f . changeNotifyStreamPosition ( ctx )
if err != nil {
fs . Infof ( f , "Failed to get StreamPosition: %s" , err )
}
2023-09-15 01:21:54 +08:00
// box can send duplicate Event IDs. Use this map to track and filter
2023-09-23 19:20:01 +08:00
// the ones we've already processed.
2023-09-15 01:21:54 +08:00
processedEventIDs := make ( map [ string ] time . Time )
2023-08-25 11:40:46 +08:00
var ticker * time . Ticker
var tickerC <- chan time . Time
for {
select {
case pollInterval , ok := <- pollIntervalChan :
if ! ok {
if ticker != nil {
ticker . Stop ( )
}
return
}
if ticker != nil {
ticker . Stop ( )
ticker , tickerC = nil , nil
}
if pollInterval != 0 {
ticker = time . NewTicker ( pollInterval )
tickerC = ticker . C
}
case <- tickerC :
if streamPosition == "" {
streamPosition , err = f . changeNotifyStreamPosition ( ctx )
if err != nil {
fs . Infof ( f , "Failed to get StreamPosition: %s" , err )
continue
}
}
2023-09-15 01:21:54 +08:00
// Garbage collect EventIDs older than 1 minute
for eventID , timestamp := range processedEventIDs {
if time . Since ( timestamp ) > time . Minute {
delete ( processedEventIDs , eventID )
}
}
streamPosition , err = f . changeNotifyRunner ( ctx , notifyFunc , streamPosition , processedEventIDs )
2023-08-25 11:40:46 +08:00
if err != nil {
fs . Infof ( f , "Change notify listener failure: %s" , err )
}
}
}
} ( )
}
func ( f * Fs ) changeNotifyStreamPosition ( ctx context . Context ) ( streamPosition string , err error ) {
opts := rest . Opts {
Method : "GET" ,
Path : "/events" ,
Parameters : fieldsValue ( ) ,
}
opts . Parameters . Set ( "stream_position" , "now" )
opts . Parameters . Set ( "stream_type" , "changes" )
var result api . Events
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err = f . srv . CallJSON ( ctx , & opts , nil , & result )
return shouldRetry ( ctx , resp , err )
} )
if err != nil {
return "" , err
}
return strconv . FormatInt ( result . NextStreamPosition , 10 ) , nil
}
// Attempts to construct the full path for an object, given the ID of its
// parent directory and the name of the object.
//
// Can return "" if the parentID is not currently in the directory cache.
func ( f * Fs ) getFullPath ( parentID string , childName string ) ( fullPath string ) {
fullPath = ""
name := f . opt . Enc . ToStandardName ( childName )
if parentID != "" {
if parentDir , ok := f . dirCache . GetInv ( parentID ) ; ok {
if len ( parentDir ) > 0 {
fullPath = parentDir + "/" + name
} else {
fullPath = name
}
}
} else {
// No parent, this object is at the root
fullPath = name
}
return fullPath
}
2023-09-15 01:21:54 +08:00
func ( f * Fs ) changeNotifyRunner ( ctx context . Context , notifyFunc func ( string , fs . EntryType ) , streamPosition string , processedEventIDs map [ string ] time . Time ) ( nextStreamPosition string , err error ) {
2023-08-25 11:40:46 +08:00
nextStreamPosition = streamPosition
for {
limit := f . opt . ListChunk
// box only allows a max of 500 events
if limit > 500 {
limit = 500
}
opts := rest . Opts {
Method : "GET" ,
Path : "/events" ,
Parameters : fieldsValue ( ) ,
}
opts . Parameters . Set ( "stream_position" , nextStreamPosition )
opts . Parameters . Set ( "stream_type" , "changes" )
opts . Parameters . Set ( "limit" , strconv . Itoa ( limit ) )
var result api . Events
var resp * http . Response
fs . Debugf ( f , "Checking for changes on remote (next_stream_position: %q)" , nextStreamPosition )
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err = f . srv . CallJSON ( ctx , & opts , nil , & result )
return shouldRetry ( ctx , resp , err )
} )
if err != nil {
return "" , err
}
if result . ChunkSize != int64 ( len ( result . Entries ) ) {
return "" , fmt . Errorf ( "invalid response to event request, chunk_size (%v) not equal to number of entries (%v)" , result . ChunkSize , len ( result . Entries ) )
}
nextStreamPosition = strconv . FormatInt ( result . NextStreamPosition , 10 )
if result . ChunkSize == 0 {
return nextStreamPosition , nil
}
type pathToClear struct {
path string
entryType fs . EntryType
}
var pathsToClear [ ] pathToClear
newEventIDs := 0
for _ , entry := range result . Entries {
2023-09-15 01:22:41 +08:00
eventDetails := fmt . Sprintf ( "[%q(%d)|%s|%s|%s|%s]" , entry . Source . Name , entry . Source . SequenceID ,
entry . Source . Type , entry . EventType , entry . Source . ID , entry . EventID )
if entry . EventID == "" {
fs . Debugf ( f , "%s ignored due to missing EventID" , eventDetails )
continue
}
if _ , ok := processedEventIDs [ entry . EventID ] ; ok {
fs . Debugf ( f , "%s ignored due to duplicate EventID" , eventDetails )
2023-08-25 11:40:46 +08:00
continue
}
2023-09-15 01:21:54 +08:00
processedEventIDs [ entry . EventID ] = time . Now ( )
2023-08-25 11:40:46 +08:00
newEventIDs ++
if entry . Source . ID == "" { // missing File or Folder ID
2023-09-15 01:22:41 +08:00
fs . Debugf ( f , "%s ignored due to missing SourceID" , eventDetails )
2023-08-25 11:40:46 +08:00
continue
}
if entry . Source . Type != api . ItemTypeFile && entry . Source . Type != api . ItemTypeFolder { // event is not for a file or folder
2023-09-15 01:22:41 +08:00
fs . Debugf ( f , "%s ignored due to unsupported SourceType" , eventDetails )
2023-08-25 11:40:46 +08:00
continue
}
// Only interested in event types that result in a file tree change
if _ , found := api . FileTreeChangeEventTypes [ entry . EventType ] ; ! found {
2023-09-15 01:22:41 +08:00
fs . Debugf ( f , "%s ignored due to unsupported EventType" , eventDetails )
2023-08-25 11:40:46 +08:00
continue
}
f . itemMetaCacheMu . Lock ( )
itemMeta , cachedItemMetaFound := f . itemMetaCache [ entry . Source . ID ]
if cachedItemMetaFound {
if itemMeta . SequenceID >= entry . Source . SequenceID {
// Item in the cache has the same or newer SequenceID than
// this event. Ignore this event, it must be old.
f . itemMetaCacheMu . Unlock ( )
2023-09-15 01:22:41 +08:00
fs . Debugf ( f , "%s ignored due to old SequenceID (%q)" , eventDetails , itemMeta . SequenceID )
2023-08-25 11:40:46 +08:00
continue
}
// This event is newer. Delete its entry from the cache,
// we'll notify about its change below, then it's up to a
// future list operation to repopulate the cache.
delete ( f . itemMetaCache , entry . Source . ID )
}
f . itemMetaCacheMu . Unlock ( )
entryType := fs . EntryDirectory
if entry . Source . Type == api . ItemTypeFile {
entryType = fs . EntryObject
}
// The box event only includes the new path for the object (e.g.
// the path after the object was moved). If there was an old path
// saved in our cache, it must be cleared.
if cachedItemMetaFound {
path := f . getFullPath ( itemMeta . ParentID , itemMeta . Name )
if path != "" {
2023-09-15 01:22:41 +08:00
fs . Debugf ( f , "%s added old path (%q) for notify" , eventDetails , path )
2023-08-25 11:40:46 +08:00
pathsToClear = append ( pathsToClear , pathToClear { path : path , entryType : entryType } )
2023-09-15 01:22:41 +08:00
} else {
fs . Debugf ( f , "%s old parent not cached" , eventDetails )
2023-08-25 11:40:46 +08:00
}
// If this is a directory, also delete it from the dir cache.
// This will effectively invalidate the item metadata cache
// entries for all descendents of this directory, since we
// will no longer be able to construct a full path for them.
// This is exactly what we want, since we don't want to notify
// on the paths of these descendents if one of their ancestors
// has been renamed/deleted.
if entry . Source . Type == api . ItemTypeFolder {
f . dirCache . FlushDir ( path )
}
}
// If the item is "active", then it is not trashed or deleted, so
// it potentially has a valid parent.
//
// Construct the new path of the object, based on the Parent ID
// and its name. If we get an empty result, it means we don't
// currently know about this object so notification is unnecessary.
if entry . Source . ItemStatus == api . ItemStatusActive {
path := f . getFullPath ( entry . Source . Parent . ID , entry . Source . Name )
if path != "" {
2023-09-15 01:22:41 +08:00
fs . Debugf ( f , "%s added new path (%q) for notify" , eventDetails , path )
2023-08-25 11:40:46 +08:00
pathsToClear = append ( pathsToClear , pathToClear { path : path , entryType : entryType } )
2023-09-15 01:22:41 +08:00
} else {
fs . Debugf ( f , "%s new parent not found" , eventDetails )
2023-08-25 11:40:46 +08:00
}
}
}
// box can sometimes repeatedly return the same Event IDs within a
// short period of time. If it stops giving us new ones, treat it
// the same as if it returned us none at all.
if newEventIDs == 0 {
return nextStreamPosition , nil
}
notifiedPaths := make ( map [ string ] bool )
for _ , p := range pathsToClear {
if _ , ok := notifiedPaths [ p . path ] ; ok {
continue
}
notifiedPaths [ p . path ] = true
notifyFunc ( p . path , p . entryType )
}
fs . Debugf ( f , "Received %v events, resulting in %v paths and %v notifications" , len ( result . Entries ) , len ( pathsToClear ) , len ( notifiedPaths ) )
}
}
2017-07-06 05:20:40 +08:00
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func ( f * Fs ) DirCacheFlush ( ) {
f . dirCache . ResetRoot ( )
}
// Hashes returns the supported hash sets.
2018-01-13 00:30:54 +08:00
func ( f * Fs ) Hashes ( ) hash . Set {
2018-01-19 04:27:52 +08:00
return hash . Set ( hash . SHA1 )
2017-07-06 05:20:40 +08:00
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func ( o * Object ) Fs ( ) fs . Info {
return o . fs
}
// Return a string version
func ( o * Object ) String ( ) string {
if o == nil {
return "<nil>"
}
return o . remote
}
// Remote returns the remote path
func ( o * Object ) Remote ( ) string {
return o . remote
}
// Hash returns the SHA-1 of an object returning a lowercase hex string
2019-06-17 16:34:30 +08:00
func ( o * Object ) Hash ( ctx context . Context , t hash . Type ) ( string , error ) {
2018-01-19 04:27:52 +08:00
if t != hash . SHA1 {
return "" , hash . ErrUnsupported
2017-07-06 05:20:40 +08:00
}
return o . sha1 , nil
}
// Size returns the size of an object in bytes
func ( o * Object ) Size ( ) int64 {
2019-06-17 16:34:30 +08:00
err := o . readMetaData ( context . TODO ( ) )
2017-07-06 05:20:40 +08:00
if err != nil {
fs . Logf ( o , "Failed to read metadata: %v" , err )
return 0
}
return o . size
}
// setMetaData sets the metadata from info
func ( o * Object ) setMetaData ( info * api . Item ) ( err error ) {
2021-09-06 20:54:08 +08:00
if info . Type == api . ItemTypeFolder {
return fs . ErrorIsDir
}
2017-07-06 05:20:40 +08:00
if info . Type != api . ItemTypeFile {
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "%q is %q: %w" , o . remote , info . Type , fs . ErrorNotAFile )
2017-07-06 05:20:40 +08:00
}
o . hasMetaData = true
2018-04-19 16:18:14 +08:00
o . size = int64 ( info . Size )
2017-07-06 05:20:40 +08:00
o . sha1 = info . SHA1
o . modTime = info . ModTime ( )
o . id = info . ID
2018-08-19 21:13:33 +08:00
o . publicLink = info . SharedLink . URL
2017-07-06 05:20:40 +08:00
return nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
2019-06-17 16:34:30 +08:00
func ( o * Object ) readMetaData ( ctx context . Context ) ( err error ) {
2017-07-06 05:20:40 +08:00
if o . hasMetaData {
return nil
}
2019-06-17 16:34:30 +08:00
info , err := o . fs . readMetaDataForPath ( ctx , o . remote )
2017-07-06 05:20:40 +08:00
if err != nil {
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . Code == "not_found" || apiErr . Code == "trashed" {
return fs . ErrorObjectNotFound
}
}
return err
}
return o . setMetaData ( info )
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
2019-06-17 16:34:30 +08:00
func ( o * Object ) ModTime ( ctx context . Context ) time . Time {
err := o . readMetaData ( ctx )
2017-07-06 05:20:40 +08:00
if err != nil {
fs . Logf ( o , "Failed to read metadata: %v" , err )
return time . Now ( )
}
return o . modTime
}
// setModTime sets the modification time of the local fs object
2019-06-17 16:34:30 +08:00
func ( o * Object ) setModTime ( ctx context . Context , modTime time . Time ) ( * api . Item , error ) {
2017-07-06 05:20:40 +08:00
opts := rest . Opts {
Method : "PUT" ,
Path : "/files/" + o . id ,
Parameters : fieldsValue ( ) ,
}
update := api . UpdateFileModTime {
ContentModifiedAt : api . Time ( modTime ) ,
}
var info * api . Item
err := o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err := o . fs . srv . CallJSON ( ctx , & opts , & update , & info )
2021-03-11 22:44:01 +08:00
return shouldRetry ( ctx , resp , err )
2017-07-06 05:20:40 +08:00
} )
return info , err
}
// SetModTime sets the modification time of the local fs object
2019-06-17 16:34:30 +08:00
func ( o * Object ) SetModTime ( ctx context . Context , modTime time . Time ) error {
info , err := o . setModTime ( ctx , modTime )
2017-07-06 05:20:40 +08:00
if err != nil {
return err
}
return o . setMetaData ( info )
}
// Storable returns a boolean showing whether this object storable
func ( o * Object ) Storable ( ) bool {
return true
}
// Open an object for read
2019-06-17 16:34:30 +08:00
func ( o * Object ) Open ( ctx context . Context , options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2017-07-06 05:20:40 +08:00
if o . id == "" {
return nil , errors . New ( "can't download - no id" )
}
2018-01-23 01:05:47 +08:00
fs . FixRangeOption ( options , o . size )
2017-07-06 05:20:40 +08:00
var resp * http . Response
opts := rest . Opts {
Method : "GET" ,
Path : "/files/" + o . id + "/content" ,
Options : options ,
}
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = o . fs . srv . Call ( ctx , & opts )
2021-03-11 22:44:01 +08:00
return shouldRetry ( ctx , resp , err )
2017-07-06 05:20:40 +08:00
} )
if err != nil {
return nil , err
}
return resp . Body , err
}
// upload does a single non-multipart upload
//
2021-03-03 03:11:57 +08:00
// This is recommended for less than 50 MiB of content
2020-03-22 05:45:57 +08:00
func ( o * Object ) upload ( ctx context . Context , in io . Reader , leaf , directoryID string , modTime time . Time , options ... fs . OpenOption ) ( err error ) {
2017-07-06 05:20:40 +08:00
upload := api . UploadFile {
2020-01-15 01:33:35 +08:00
Name : o . fs . opt . Enc . FromStandardName ( leaf ) ,
2017-07-06 05:20:40 +08:00
ContentModifiedAt : api . Time ( modTime ) ,
ContentCreatedAt : api . Time ( modTime ) ,
Parent : api . Parent {
ID : directoryID ,
} ,
}
var resp * http . Response
var result api . FolderItems
opts := rest . Opts {
2018-08-18 19:33:33 +08:00
Method : "POST" ,
Body : in ,
2017-07-06 05:20:40 +08:00
MultipartMetadataName : "attributes" ,
MultipartContentName : "contents" ,
MultipartFileName : upload . Name ,
RootURL : uploadURL ,
2020-03-22 05:45:57 +08:00
Options : options ,
2017-07-06 05:20:40 +08:00
}
// If object has an ID then it is existing so create a new version
if o . id != "" {
opts . Path = "/files/" + o . id + "/content"
} else {
opts . Path = "/files/content"
}
err = o . fs . pacer . CallNoRetry ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = o . fs . srv . CallJSON ( ctx , & opts , & upload , & result )
2021-03-11 22:44:01 +08:00
return shouldRetry ( ctx , resp , err )
2017-07-06 05:20:40 +08:00
} )
if err != nil {
return err
}
if result . TotalCount != 1 || len ( result . Entries ) != 1 {
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "failed to upload %v - not sure why" , o )
2017-07-06 05:20:40 +08:00
}
return o . setMetaData ( & result . Entries [ 0 ] )
}
// Update the object with the contents of the io.Reader, modTime and size
//
2022-08-05 23:35:41 +08:00
// If existing is set then it updates the object rather than creating a new one.
2017-07-06 05:20:40 +08:00
//
2022-08-05 23:35:41 +08:00
// The new object may have been created if an error is returned.
2019-06-17 16:34:30 +08:00
func ( o * Object ) Update ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( err error ) {
2020-05-22 17:59:45 +08:00
if o . fs . tokenRenewer != nil {
o . fs . tokenRenewer . Start ( )
defer o . fs . tokenRenewer . Stop ( )
}
2017-07-06 05:20:40 +08:00
size := src . Size ( )
2019-06-17 16:34:30 +08:00
modTime := src . ModTime ( ctx )
2017-09-15 00:21:11 +08:00
remote := o . Remote ( )
2017-07-06 05:20:40 +08:00
// Create the directory for the object if it doesn't exist
2020-05-12 00:24:37 +08:00
leaf , directoryID , err := o . fs . dirCache . FindPath ( ctx , remote , true )
2017-07-06 05:20:40 +08:00
if err != nil {
return err
}
// Upload with simple or multipart
2018-05-15 01:06:57 +08:00
if size <= int64 ( o . fs . opt . UploadCutoff ) {
2020-03-22 05:45:57 +08:00
err = o . upload ( ctx , in , leaf , directoryID , modTime , options ... )
2017-07-06 05:20:40 +08:00
} else {
2020-03-22 05:49:12 +08:00
err = o . uploadMultipart ( ctx , in , leaf , directoryID , size , modTime , options ... )
2017-07-06 05:20:40 +08:00
}
return err
}
// Remove an object
2019-06-17 16:34:30 +08:00
func ( o * Object ) Remove ( ctx context . Context ) error {
2019-09-05 03:00:37 +08:00
return o . fs . deleteObject ( ctx , o . id )
2017-07-06 05:20:40 +08:00
}
2018-05-13 16:16:56 +08:00
// ID returns the ID of the Object if known, or "" if not
func ( o * Object ) ID ( ) string {
return o . id
}
2017-07-06 05:20:40 +08:00
// Check the interfaces are satisfied
var (
_ fs . Fs = ( * Fs ) ( nil )
_ fs . Purger = ( * Fs ) ( nil )
2017-08-19 18:32:56 +08:00
_ fs . PutStreamer = ( * Fs ) ( nil )
2017-07-06 05:20:40 +08:00
_ fs . Copier = ( * Fs ) ( nil )
2020-05-23 20:11:22 +08:00
_ fs . Abouter = ( * Fs ) ( nil )
2017-07-06 05:20:40 +08:00
_ fs . Mover = ( * Fs ) ( nil )
_ fs . DirMover = ( * Fs ) ( nil )
_ fs . DirCacheFlusher = ( * Fs ) ( nil )
2018-08-19 21:13:33 +08:00
_ fs . PublicLinker = ( * Fs ) ( nil )
2020-06-17 20:04:33 +08:00
_ fs . CleanUpper = ( * Fs ) ( nil )
2023-12-08 12:33:51 +08:00
_ fs . Shutdowner = ( * Fs ) ( nil )
2017-07-06 05:20:40 +08:00
_ fs . Object = ( * Object ) ( nil )
2018-05-13 16:16:56 +08:00
_ fs . IDer = ( * Object ) ( nil )
2017-07-06 05:20:40 +08:00
)