2017-03-08 19:21:57 +08:00
// Package onedrive provides an interface to the Microsoft OneDrive
2015-10-05 05:08:31 +08:00
// object storage system.
package onedrive
import (
2019-06-17 16:34:30 +08:00
"context"
2018-04-20 19:55:49 +08:00
"encoding/base64"
"encoding/hex"
2017-03-12 20:00:10 +08:00
"encoding/json"
2015-10-05 05:08:31 +08:00
"fmt"
"io"
"log"
"net/http"
2016-11-26 05:52:43 +08:00
"path"
2020-01-29 20:16:18 +08:00
"strconv"
2015-10-05 05:08:31 +08:00
"strings"
2020-08-04 03:45:37 +08:00
"sync"
2015-10-05 05:08:31 +08:00
"time"
2016-05-31 02:49:21 +08:00
"github.com/pkg/errors"
2019-07-29 01:47:38 +08:00
"github.com/rclone/rclone/backend/onedrive/api"
2019-09-23 21:32:36 +08:00
"github.com/rclone/rclone/backend/onedrive/quickxorhash"
2019-07-29 01:47:38 +08:00
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
2020-08-04 03:45:37 +08:00
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
2018-11-02 20:14:19 +08:00
"github.com/rclone/rclone/lib/atexit"
2019-07-29 01:47:38 +08:00
"github.com/rclone/rclone/lib/dircache"
2020-01-15 01:33:35 +08:00
"github.com/rclone/rclone/lib/encoder"
2019-07-29 01:47:38 +08:00
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
2015-10-05 05:08:31 +08:00
"golang.org/x/oauth2"
)
const (
2018-08-18 18:06:22 +08:00
rcloneClientID = "b15665d9-eda6-4092-8539-0eec376afd59"
rcloneEncryptedClientSecret = "_JUdzh3LnKNqSPcf4Wu5fgMFIQOI8glZu_akYgR8yf6egowNBg-R"
minSleep = 10 * time . Millisecond
maxSleep = 2 * time . Second
decayConstant = 2 // bigger for slower decay, exponential
graphURL = "https://graph.microsoft.com/v1.0"
2018-08-21 09:52:24 +08:00
configDriveID = "drive_id"
configDriveType = "drive_type"
2018-08-18 18:06:22 +08:00
driveTypePersonal = "personal"
driveTypeBusiness = "business"
driveTypeSharepoint = "documentLibrary"
2018-09-07 19:02:27 +08:00
defaultChunkSize = 10 * fs . MebiByte
chunkSizeMultiple = 320 * fs . KibiByte
2015-10-05 05:08:31 +08:00
)
// Globals
var (
2017-08-04 03:57:42 +08:00
// Description of how to auth for this app for a business account
2018-08-18 18:06:22 +08:00
oauthConfig = & oauth2 . Config {
2017-08-04 03:57:42 +08:00
Endpoint : oauth2 . Endpoint {
2018-08-18 18:06:22 +08:00
AuthURL : "https://login.microsoftonline.com/common/oauth2/v2.0/authorize" ,
TokenURL : "https://login.microsoftonline.com/common/oauth2/v2.0/token" ,
2017-08-04 03:57:42 +08:00
} ,
2020-01-20 20:30:19 +08:00
Scopes : [ ] string { "Files.Read" , "Files.ReadWrite" , "Files.Read.All" , "Files.ReadWrite.All" , "offline_access" , "Sites.Read.All" } ,
2018-08-18 18:06:22 +08:00
ClientID : rcloneClientID ,
ClientSecret : obscure . MustReveal ( rcloneEncryptedClientSecret ) ,
2017-08-04 03:57:42 +08:00
RedirectURL : oauthutil . RedirectLocalhostURL ,
}
2019-09-23 21:32:36 +08:00
// QuickXorHashType is the hash.Type for OneDrive
QuickXorHashType hash . Type
2015-10-05 05:08:31 +08:00
)
// Register with Fs
func init ( ) {
2019-09-23 21:32:36 +08:00
QuickXorHashType = hash . RegisterHash ( "QuickXorHash" , 40 , quickxorhash . New )
2016-02-18 19:35:25 +08:00
fs . Register ( & fs . RegInfo {
2016-02-16 02:11:53 +08:00
Name : "onedrive" ,
Description : "Microsoft OneDrive" ,
NewFs : NewFs ,
2020-11-06 02:02:26 +08:00
Config : func ( ctx context . Context , name string , m configmap . Mapper ) {
2020-11-05 19:33:32 +08:00
ci := fs . GetConfig ( ctx )
2020-11-06 02:02:26 +08:00
err := oauthutil . Config ( ctx , "onedrive" , name , m , oauthConfig , nil )
2018-08-18 18:06:22 +08:00
if err != nil {
log . Fatalf ( "Failed to configure token: %v" , err )
return
}
2017-09-06 23:19:52 +08:00
2019-01-17 23:01:13 +08:00
// Stop if we are running non-interactive config
2020-11-05 19:33:32 +08:00
if ci . AutoConfirm {
2018-08-18 18:06:22 +08:00
return
}
2017-08-04 03:57:42 +08:00
2018-08-18 18:06:22 +08:00
type driveResource struct {
DriveID string ` json:"id" `
DriveName string ` json:"name" `
DriveType string ` json:"driveType" `
}
type drivesResponse struct {
Drives [ ] driveResource ` json:"value" `
}
2017-08-04 03:57:42 +08:00
2018-08-18 18:06:22 +08:00
type siteResource struct {
SiteID string ` json:"id" `
SiteName string ` json:"displayName" `
SiteURL string ` json:"webUrl" `
}
type siteResponse struct {
Sites [ ] siteResource ` json:"value" `
}
2020-11-06 02:02:26 +08:00
oAuthClient , _ , err := oauthutil . NewClient ( ctx , name , m , oauthConfig )
2018-08-18 18:06:22 +08:00
if err != nil {
log . Fatalf ( "Failed to configure OneDrive: %v" , err )
}
srv := rest . NewClient ( oAuthClient )
var opts rest . Opts
var finalDriveID string
var siteID string
switch config . Choose ( "Your choice" ,
[ ] string { "onedrive" , "sharepoint" , "driveid" , "siteid" , "search" } ,
2018-08-30 11:36:50 +08:00
[ ] string { "OneDrive Personal or Business" , "Root Sharepoint site" , "Type in driveID" , "Type in SiteID" , "Search a Sharepoint site" } ,
2018-08-18 18:06:22 +08:00
false ) {
2017-08-04 03:57:42 +08:00
2018-08-18 18:06:22 +08:00
case "onedrive" :
opts = rest . Opts {
2017-09-06 23:19:52 +08:00
Method : "GET" ,
2018-08-18 18:06:22 +08:00
RootURL : graphURL ,
Path : "/me/drives" ,
2017-08-04 03:57:42 +08:00
}
2018-08-18 18:06:22 +08:00
case "sharepoint" :
opts = rest . Opts {
Method : "GET" ,
RootURL : graphURL ,
Path : "/sites/root/drives" ,
2017-08-04 03:57:42 +08:00
}
2018-08-18 18:06:22 +08:00
case "driveid" :
fmt . Printf ( "Paste your Drive ID here> " )
finalDriveID = config . ReadLine ( )
case "siteid" :
fmt . Printf ( "Paste your Site ID here> " )
siteID = config . ReadLine ( )
case "search" :
fmt . Printf ( "What to search for> " )
searchTerm := config . ReadLine ( )
opts = rest . Opts {
Method : "GET" ,
RootURL : graphURL ,
Path : "/sites?search=" + searchTerm ,
2017-08-04 03:57:42 +08:00
}
2018-08-18 18:06:22 +08:00
sites := siteResponse { }
2019-09-05 03:00:37 +08:00
_ , err := srv . CallJSON ( ctx , & opts , nil , & sites )
2018-08-18 18:06:22 +08:00
if err != nil {
log . Fatalf ( "Failed to query available sites: %v" , err )
2017-08-04 03:57:42 +08:00
}
2018-08-18 18:06:22 +08:00
if len ( sites . Sites ) == 0 {
log . Fatalf ( "Search for '%s' returned no results" , searchTerm )
} else {
fmt . Printf ( "Found %d sites, please select the one you want to use:\n" , len ( sites . Sites ) )
for index , site := range sites . Sites {
fmt . Printf ( "%d: %s (%s) id=%s\n" , index , site . SiteName , site . SiteURL , site . SiteID )
}
siteID = sites . Sites [ config . ChooseNumber ( "Chose drive to use:" , 0 , len ( sites . Sites ) - 1 ) ] . SiteID
2017-09-06 23:19:52 +08:00
}
2018-08-18 18:06:22 +08:00
}
2017-09-06 23:19:52 +08:00
2018-08-18 18:06:22 +08:00
// if we have a siteID we need to ask for the drives
if siteID != "" {
2017-09-06 23:19:52 +08:00
opts = rest . Opts {
2018-08-18 18:06:22 +08:00
Method : "GET" ,
RootURL : graphURL ,
Path : "/sites/" + siteID + "/drives" ,
2017-09-06 23:19:52 +08:00
}
2018-08-18 18:06:22 +08:00
}
2017-09-06 23:19:52 +08:00
2018-08-18 18:06:22 +08:00
// We don't have the final ID yet?
// query Microsoft Graph
if finalDriveID == "" {
drives := drivesResponse { }
2019-09-05 03:00:37 +08:00
_ , err := srv . CallJSON ( ctx , & opts , nil , & drives )
2017-09-06 23:19:52 +08:00
if err != nil {
2018-08-18 18:06:22 +08:00
log . Fatalf ( "Failed to query available drives: %v" , err )
2017-09-06 23:19:52 +08:00
}
2020-03-22 20:21:37 +08:00
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
if opts . Path == "/me/drives" {
opts . Path = "/me/drive"
meDrive := driveResource { }
_ , err := srv . CallJSON ( ctx , & opts , nil , & meDrive )
if err != nil {
log . Fatalf ( "Failed to query available drives: %v" , err )
}
found := false
for _ , drive := range drives . Drives {
if drive . DriveID == meDrive . DriveID {
found = true
break
}
}
// add the me drive if not found already
if ! found {
fs . Debugf ( nil , "Adding %v to drives list from /me/drive" , meDrive )
drives . Drives = append ( drives . Drives , meDrive )
}
}
2018-08-18 18:06:22 +08:00
if len ( drives . Drives ) == 0 {
log . Fatalf ( "No drives found" )
} else {
fmt . Printf ( "Found %d drives, please select the one you want to use:\n" , len ( drives . Drives ) )
for index , drive := range drives . Drives {
fmt . Printf ( "%d: %s (%s) id=%s\n" , index , drive . DriveName , drive . DriveType , drive . DriveID )
}
finalDriveID = drives . Drives [ config . ChooseNumber ( "Chose drive to use:" , 0 , len ( drives . Drives ) - 1 ) ] . DriveID
2017-08-04 03:57:42 +08:00
}
2015-10-05 05:08:31 +08:00
}
2018-08-18 18:06:22 +08:00
// Test the driveID and get drive type
opts = rest . Opts {
Method : "GET" ,
RootURL : graphURL ,
Path : "/drives/" + finalDriveID + "/root" }
var rootItem api . Item
2019-09-05 03:00:37 +08:00
_ , err = srv . CallJSON ( ctx , & opts , nil , & rootItem )
2018-08-18 18:06:22 +08:00
if err != nil {
log . Fatalf ( "Failed to query root for drive %s: %v" , finalDriveID , err )
}
fmt . Printf ( "Found drive '%s' of type '%s', URL: %s\nIs that okay?\n" , rootItem . Name , rootItem . ParentReference . DriveType , rootItem . WebURL )
// This does not work, YET :)
2020-11-06 02:02:26 +08:00
if ! config . ConfirmWithConfig ( ctx , m , "config_drive_ok" , true ) {
2018-08-18 18:06:22 +08:00
log . Fatalf ( "Cancelled by user" )
}
2018-09-07 00:07:16 +08:00
m . Set ( configDriveID , finalDriveID )
m . Set ( configDriveType , rootItem . ParentReference . DriveType )
config . SaveConfig ( )
2015-10-05 05:08:31 +08:00
} ,
2020-08-02 07:32:21 +08:00
Options : append ( oauthutil . SharedOptions , [ ] fs . Option { {
2018-10-02 01:36:15 +08:00
Name : "chunk_size" ,
2019-10-12 19:08:22 +08:00
Help : ` Chunk size to upload files with - must be multiple of 320 k ( 327 , 680 bytes ) .
2018-10-02 01:36:15 +08:00
2020-03-10 23:14:08 +08:00
Above this size files will be chunked - must be multiple of 320 k ( 327 , 680 bytes ) and
should not exceed 250 M ( 262 , 144 , 000 bytes ) else you may encounter \ "Microsoft.SharePoint.Client.InvalidClientQueryException: The request message is too big.\"
Note that the chunks will be buffered into memory . ` ,
2018-09-07 19:02:27 +08:00
Default : defaultChunkSize ,
2018-05-15 01:06:57 +08:00
Advanced : true ,
2018-08-21 10:50:17 +08:00
} , {
Name : "drive_id" ,
Help : "The ID of the drive to use" ,
Default : "" ,
Advanced : true ,
} , {
Name : "drive_type" ,
2019-01-12 00:55:25 +08:00
Help : "The type of the drive ( " + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + " )" ,
2018-08-21 10:50:17 +08:00
Default : "" ,
Advanced : true ,
2018-10-03 12:46:25 +08:00
} , {
2018-10-02 01:36:15 +08:00
Name : "expose_onenote_files" ,
Help : ` Set to make OneNote files show up in directory listings .
By default rclone will hide OneNote files in directory listings because
operations like "Open" and "Update" won ' t work on them . But this
behaviour may also prevent you from deleting them . If you want to
delete OneNote files or otherwise want them to show up in directory
listing , set this option . ` ,
2018-10-03 12:46:25 +08:00
Default : false ,
Advanced : true ,
2020-03-15 20:07:46 +08:00
} , {
Name : "server_side_across_configs" ,
Default : false ,
2020-10-14 05:49:58 +08:00
Help : ` Allow server - side operations ( e . g . copy ) to work across different onedrive configs .
2020-03-15 20:07:46 +08:00
2021-01-05 21:26:00 +08:00
This will only work if you are copying between two OneDrive * Personal * drives AND
the files to copy are already shared between them . In other cases , rclone will
fall back to normal copy ( which will be slightly slower ) . ` ,
2020-03-15 20:07:46 +08:00
Advanced : true ,
2020-08-07 00:59:26 +08:00
} , {
Name : "no_versions" ,
Default : false ,
Help : ` Remove all versions on modifying operations
Onedrive for business creates versions when rclone uploads new files
overwriting an existing one and when it sets the modification time .
These versions take up space out of the quota .
This flag checks for versions after file upload and setting
modification time and removes all but the last version .
* * NB * * Onedrive personal can ' t currently delete versions so don ' t use
this flag there .
` ,
Advanced : true ,
2020-01-15 01:33:35 +08:00
} , {
Name : config . ConfigEncoding ,
Help : config . ConfigEncodingHelp ,
Advanced : true ,
2020-01-15 05:51:49 +08:00
// List of replaced characters:
// < (less than) -> '< ' // FULLWIDTH LESS-THAN SIGN
// > (greater than) -> '> ' // FULLWIDTH GREATER-THAN SIGN
// : (colon) -> ': ' // FULLWIDTH COLON
// " (double quote) -> '" ' // FULLWIDTH QUOTATION MARK
// \ (backslash) -> '\ ' // FULLWIDTH REVERSE SOLIDUS
// | (vertical line) -> '| ' // FULLWIDTH VERTICAL LINE
// ? (question mark) -> '? ' // FULLWIDTH QUESTION MARK
// * (asterisk) -> '* ' // FULLWIDTH ASTERISK
// # (number sign) -> '# ' // FULLWIDTH NUMBER SIGN
// % (percent sign) -> '% ' // FULLWIDTH PERCENT SIGN
//
// Folder names cannot begin with a tilde ('~')
// List of replaced characters:
// ~ (tilde) -> '~ ' // FULLWIDTH TILDE
//
// Additionally names can't begin with a space ( ) or end with a period (.) or space ( ).
// List of replaced characters:
// . (period) -> '. ' // FULLWIDTH FULL STOP
// (space) -> '␠' // SYMBOL FOR SPACE
//
// Also encode invalid UTF-8 bytes as json doesn't handle them.
//
// The OneDrive API documentation lists the set of reserved characters, but
// testing showed this list is incomplete. This are the differences:
// - " (double quote) is rejected, but missing in the documentation
// - space at the end of file and folder names is rejected, but missing in the documentation
// - period at the end of file names is rejected, but missing in the documentation
//
// Adding these restrictions to the OneDrive API documentation yields exactly
// the same rules as the Windows naming conventions.
//
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/addressing-driveitems?view=odsp-graph-online#path-encoding
Default : ( encoder . Display |
encoder . EncodeBackSlash |
encoder . EncodeHashPercent |
encoder . EncodeLeftSpace |
encoder . EncodeLeftTilde |
encoder . EncodeRightPeriod |
encoder . EncodeRightSpace |
encoder . EncodeWin |
encoder . EncodeInvalidUtf8 ) ,
2020-08-02 07:32:21 +08:00
} } ... ) ,
2015-10-05 05:08:31 +08:00
} )
2018-05-15 01:06:57 +08:00
}
2017-08-04 03:57:42 +08:00
2018-05-15 01:06:57 +08:00
// Options defines the configuration for this backend
type Options struct {
2020-03-15 20:07:46 +08:00
ChunkSize fs . SizeSuffix ` config:"chunk_size" `
DriveID string ` config:"drive_id" `
DriveType string ` config:"drive_type" `
ExposeOneNoteFiles bool ` config:"expose_onenote_files" `
ServerSideAcrossConfigs bool ` config:"server_side_across_configs" `
2020-08-07 00:59:26 +08:00
NoVersions bool ` config:"no_versions" `
2020-03-15 20:07:46 +08:00
Enc encoder . MultiEncoder ` config:"encoding" `
2015-10-05 05:08:31 +08:00
}
// Fs represents a remote one drive
type Fs struct {
2017-01-30 04:42:43 +08:00
name string // name of this remote
root string // the path we are working on
2018-05-15 01:06:57 +08:00
opt Options // parsed options
2020-11-05 19:33:32 +08:00
ci * fs . ConfigInfo // global config
2017-01-30 04:42:43 +08:00
features * fs . Features // optional features
srv * rest . Client // the connection to the one drive server
dirCache * dircache . DirCache // Map of directory path to directory id
2019-02-10 04:52:15 +08:00
pacer * fs . Pacer // pacer for API calls
2017-01-30 04:42:43 +08:00
tokenRenewer * oauthutil . Renew // renew the token on expiry
2018-08-18 18:06:22 +08:00
driveID string // ID to use for querying Microsoft Graph
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
2015-10-05 05:08:31 +08:00
}
// Object describes a one drive object
//
// Will definitely have info but maybe not meta
type Object struct {
2018-10-03 12:46:25 +08:00
fs * Fs // what this object is part of
remote string // The remote path
hasMetaData bool // whether info below has been set
isOneNoteFile bool // Whether the object is a OneNote file
size int64 // size of the object
modTime time . Time // modification time of the object
id string // ID of the object
sha1 string // SHA-1 of the object content
quickxorhash string // QuickXorHash of the object content
mimeType string // Content-Type of object from server (may not be as uploaded)
2015-10-05 05:08:31 +08:00
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func ( f * Fs ) Name ( ) string {
return f . name
}
// Root of the remote (as passed into NewFs)
func ( f * Fs ) Root ( ) string {
return f . root
}
// String converts this Fs to a string
func ( f * Fs ) String ( ) string {
return fmt . Sprintf ( "One drive root '%s'" , f . root )
}
2017-01-14 01:21:47 +08:00
// Features returns the optional features of this Fs
func ( f * Fs ) Features ( ) * fs . Features {
return f . features
}
2020-05-20 18:39:20 +08:00
// parsePath parses a one drive 'url'
2015-10-05 05:08:31 +08:00
func parsePath ( path string ) ( root string ) {
root = strings . Trim ( path , "/" )
return
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = [ ] int {
429 , // Too Many Requests.
500 , // Internal Server Error
502 , // Bad Gateway
503 , // Service Unavailable
504 , // Gateway Timeout
509 , // Bandwidth Limit Exceeded
}
2020-11-03 00:49:27 +08:00
var gatewayTimeoutError sync . Once
2021-01-05 21:26:00 +08:00
var errAsyncJobAccessDenied = errors . New ( "async job failed - access denied" )
2020-11-03 00:49:27 +08:00
2015-10-05 05:08:31 +08:00
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry ( resp * http . Response , err error ) ( bool , error ) {
2020-01-29 20:16:18 +08:00
retry := false
if resp != nil {
switch resp . StatusCode {
case 401 :
if len ( resp . Header [ "Www-Authenticate" ] ) == 1 && strings . Index ( resp . Header [ "Www-Authenticate" ] [ 0 ] , "expired_token" ) >= 0 {
retry = true
fs . Debugf ( nil , "Should retry: %v" , err )
}
case 429 : // Too Many Requests.
// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
if values := resp . Header [ "Retry-After" ] ; len ( values ) == 1 && values [ 0 ] != "" {
retryAfter , parseErr := strconv . Atoi ( values [ 0 ] )
if parseErr != nil {
fs . Debugf ( nil , "Failed to parse Retry-After: %q: %v" , values [ 0 ] , parseErr )
} else {
duration := time . Second * time . Duration ( retryAfter )
retry = true
err = pacer . RetryAfterError ( err , duration )
fs . Debugf ( nil , "Too many requests. Trying again in %d seconds." , retryAfter )
}
}
2020-11-03 00:49:27 +08:00
case 504 : // Gateway timeout
gatewayTimeoutError . Do ( func ( ) {
fs . Errorf ( nil , "%v: upload chunks may be taking too long - try reducing --onedrive-chunk-size or decreasing --transfers" , err )
} )
2020-03-25 20:56:38 +08:00
case 507 : // Insufficient Storage
return false , fserrors . FatalError ( err )
2020-01-29 20:16:18 +08:00
}
2017-03-23 21:10:43 +08:00
}
2020-01-29 20:16:18 +08:00
return retry || fserrors . ShouldRetry ( err ) || fserrors . ShouldRetryHTTP ( resp , retryErrorCodes ) , err
2015-10-05 05:08:31 +08:00
}
2019-01-09 13:11:00 +08:00
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
// if `relPath` == "", it reads the metadata for the item with that ID.
2019-03-11 16:30:38 +08:00
//
// We address items using the pattern `drives/driveID/items/itemID:/relativePath`
// instead of simply using `drives/driveID/root:/itemPath` because it works for
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
2019-10-27 01:02:22 +08:00
//
// If `relPath` == '', do not append the slash (See #3664)
2019-09-05 03:00:37 +08:00
func ( f * Fs ) readMetaDataForPathRelativeToID ( ctx context . Context , normalizedID string , relPath string ) ( info * api . Item , resp * http . Response , err error ) {
2019-10-27 01:02:22 +08:00
if relPath != "" {
2020-01-15 01:33:35 +08:00
relPath = "/" + withTrailingColon ( rest . URLPathEscape ( f . opt . Enc . FromStandardPath ( relPath ) ) )
2019-10-27 01:02:22 +08:00
}
opts := newOptsCall ( normalizedID , "GET" , ":" + relPath )
2015-10-05 05:08:31 +08:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = f . srv . CallJSON ( ctx , & opts , nil , & info )
2015-10-05 05:08:31 +08:00
return shouldRetry ( resp , err )
} )
2018-07-12 01:48:59 +08:00
2015-10-05 05:08:31 +08:00
return info , resp , err
}
2019-01-09 13:11:00 +08:00
// readMetaDataForPath reads the metadata from the path (relative to the absolute root)
2019-06-17 16:34:30 +08:00
func ( f * Fs ) readMetaDataForPath ( ctx context . Context , path string ) ( info * api . Item , resp * http . Response , err error ) {
2019-01-09 13:11:00 +08:00
firstSlashIndex := strings . IndexRune ( path , '/' )
if f . driveType != driveTypePersonal || firstSlashIndex == - 1 {
var opts rest . Opts
if len ( path ) == 0 {
opts = rest . Opts {
Method : "GET" ,
Path : "/root" ,
}
} else {
opts = rest . Opts {
Method : "GET" ,
2020-01-15 01:33:35 +08:00
Path : "/root:/" + rest . URLPathEscape ( f . opt . Enc . FromStandardPath ( path ) ) ,
2019-01-09 13:11:00 +08:00
}
}
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = f . srv . CallJSON ( ctx , & opts , nil , & info )
2019-01-09 13:11:00 +08:00
return shouldRetry ( resp , err )
} )
return info , resp , err
}
// The following branch handles the case when we're using OneDrive Personal and the path is in a folder.
// For OneDrive Personal, we need to consider the "shared with me" folders.
// An item in such a folder can only be addressed by its ID relative to the sharer's driveID or
// by its path relative to the folder's ID relative to the sharer's driveID.
// Note: A "shared with me" folder can only be placed in the sharee's absolute root.
// So we read metadata relative to a suitable folder's normalized ID.
var dirCacheFoundRoot bool
var rootNormalizedID string
if f . dirCache != nil {
2020-05-12 00:24:37 +08:00
rootNormalizedID , err = f . dirCache . RootID ( ctx , false )
dirCacheRootIDExists := err == nil
2019-05-21 15:45:03 +08:00
if f . root == "" {
// if f.root == "", it means f.root is the absolute root of the drive
// and its ID should have been found in NewFs
dirCacheFoundRoot = dirCacheRootIDExists
2020-05-12 00:24:37 +08:00
} else if _ , err := f . dirCache . RootParentID ( ctx , false ) ; err == nil {
2019-05-21 15:45:03 +08:00
// if root is in a folder, it must have a parent folder, and
// if dirCache has found root in NewFs, the parent folder's ID
// should be present.
// This RootParentID() check is a fix for #3164 which describes
// a possible case where the root is not found.
dirCacheFoundRoot = dirCacheRootIDExists
2019-01-09 13:11:00 +08:00
}
}
relPath , insideRoot := getRelativePathInsideBase ( f . root , path )
var firstDir , baseNormalizedID string
if ! insideRoot || ! dirCacheFoundRoot {
// We do not have the normalized ID in dirCache for our query to base on. Query it manually.
firstDir , relPath = path [ : firstSlashIndex ] , path [ firstSlashIndex + 1 : ]
2019-06-17 16:34:30 +08:00
info , resp , err := f . readMetaDataForPath ( ctx , firstDir )
2019-01-09 13:11:00 +08:00
if err != nil {
return info , resp , err
}
baseNormalizedID = info . GetID ( )
} else {
if f . root != "" {
// Read metadata based on root
baseNormalizedID = rootNormalizedID
} else {
// Read metadata based on firstDir
firstDir , relPath = path [ : firstSlashIndex ] , path [ firstSlashIndex + 1 : ]
2019-06-17 16:34:30 +08:00
baseNormalizedID , err = f . dirCache . FindDir ( ctx , firstDir , false )
2019-01-09 13:11:00 +08:00
if err != nil {
return nil , nil , err
}
}
}
2019-09-05 03:00:37 +08:00
return f . readMetaDataForPathRelativeToID ( ctx , baseNormalizedID , relPath )
2019-01-09 13:11:00 +08:00
}
2015-11-27 20:46:13 +08:00
// errorHandler parses a non 2xx error response into an error
func errorHandler ( resp * http . Response ) error {
// Decode error response
errResponse := new ( api . Error )
err := rest . DecodeJSON ( resp , & errResponse )
if err != nil {
2017-02-09 19:01:20 +08:00
fs . Debugf ( nil , "Couldn't decode error response: %v" , err )
2015-11-27 20:46:13 +08:00
}
if errResponse . ErrorInfo . Code == "" {
errResponse . ErrorInfo . Code = resp . Status
}
return errResponse
}
2018-09-07 19:02:27 +08:00
func checkUploadChunkSize ( cs fs . SizeSuffix ) error {
const minChunkSize = fs . Byte
if cs % chunkSizeMultiple != 0 {
return errors . Errorf ( "%s is not a multiple of %s" , cs , chunkSizeMultiple )
}
if cs < minChunkSize {
return errors . Errorf ( "%s is less than %s" , cs , minChunkSize )
}
return nil
}
func ( f * Fs ) setUploadChunkSize ( cs fs . SizeSuffix ) ( old fs . SizeSuffix , err error ) {
err = checkUploadChunkSize ( cs )
if err == nil {
old , f . opt . ChunkSize = f . opt . ChunkSize , cs
}
return
}
2015-10-05 05:08:31 +08:00
// NewFs constructs an Fs from the path, container:path
2020-11-05 23:18:51 +08:00
func NewFs ( ctx context . Context , name , root string , m configmap . Mapper ) ( fs . Fs , error ) {
2018-05-15 01:06:57 +08:00
// Parse config into Options struct
opt := new ( Options )
err := configstruct . Set ( m , opt )
if err != nil {
return nil , err
}
2018-09-07 19:02:27 +08:00
err = checkUploadChunkSize ( opt . ChunkSize )
if err != nil {
return nil , errors . Wrap ( err , "onedrive: chunk size" )
2018-05-15 01:06:57 +08:00
}
2017-08-04 03:57:42 +08:00
2018-08-21 10:50:17 +08:00
if opt . DriveID == "" || opt . DriveType == "" {
2018-11-11 18:40:13 +08:00
return nil , errors . New ( "unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend" )
2018-08-21 10:50:17 +08:00
}
2018-08-18 18:06:22 +08:00
2015-10-05 05:08:31 +08:00
root = parsePath ( root )
2020-11-06 02:02:26 +08:00
oAuthClient , ts , err := oauthutil . NewClient ( ctx , name , m , oauthConfig )
2015-10-05 05:08:31 +08:00
if err != nil {
2018-11-11 18:40:13 +08:00
return nil , errors . Wrap ( err , "failed to configure OneDrive" )
2015-10-05 05:08:31 +08:00
}
2020-11-05 19:33:32 +08:00
ci := fs . GetConfig ( ctx )
2015-10-05 05:08:31 +08:00
f := & Fs {
2018-08-18 18:06:22 +08:00
name : name ,
root : root ,
opt : * opt ,
2020-11-05 19:33:32 +08:00
ci : ci ,
2018-08-21 10:50:17 +08:00
driveID : opt . DriveID ,
driveType : opt . DriveType ,
srv : rest . NewClient ( oAuthClient ) . SetRoot ( graphURL + "/drives/" + opt . DriveID ) ,
2020-11-05 19:33:32 +08:00
pacer : fs . NewPacer ( ctx , pacer . NewDefault ( pacer . MinSleep ( minSleep ) , pacer . MaxSleep ( maxSleep ) , pacer . DecayConstant ( decayConstant ) ) ) ,
2015-10-05 05:08:31 +08:00
}
2017-08-09 22:27:43 +08:00
f . features = ( & fs . Features {
2018-08-18 18:06:22 +08:00
CaseInsensitive : true ,
ReadMimeType : true ,
2017-08-09 22:27:43 +08:00
CanHaveEmptyDirectories : true ,
2020-03-15 20:07:46 +08:00
ServerSideAcrossConfigs : opt . ServerSideAcrossConfigs ,
2020-11-06 00:00:40 +08:00
} ) . Fill ( ctx , f )
2015-11-27 20:46:13 +08:00
f . srv . SetErrorHandler ( errorHandler )
2015-10-05 05:08:31 +08:00
2017-01-30 04:42:43 +08:00
// Renew the token in the background
f . tokenRenewer = oauthutil . NewRenew ( f . String ( ) , ts , func ( ) error {
2019-06-17 16:34:30 +08:00
_ , _ , err := f . readMetaDataForPath ( ctx , "" )
2017-01-30 04:42:43 +08:00
return err
} )
2017-06-27 18:34:32 +08:00
// Get rootID
2019-06-17 16:34:30 +08:00
rootInfo , _ , err := f . readMetaDataForPath ( ctx , "" )
2019-01-24 11:52:01 +08:00
if err != nil || rootInfo . GetID ( ) == "" {
2017-06-27 18:34:32 +08:00
return nil , errors . Wrap ( err , "failed to get root" )
}
2019-01-24 11:52:01 +08:00
f . dirCache = dircache . New ( root , rootInfo . GetID ( ) , f )
2015-10-05 05:08:31 +08:00
// Find the current root
2019-06-17 16:34:30 +08:00
err = f . dirCache . FindRoot ( ctx , false )
2015-10-05 05:08:31 +08:00
if err != nil {
// Assume it is a file
newRoot , remote := dircache . SplitPath ( root )
2018-10-14 21:41:26 +08:00
tempF := * f
tempF . dirCache = dircache . New ( newRoot , rootInfo . ID , & tempF )
tempF . root = newRoot
2015-10-05 05:08:31 +08:00
// Make new Fs which is the parent
2019-06-17 16:34:30 +08:00
err = tempF . dirCache . FindRoot ( ctx , false )
2015-10-05 05:08:31 +08:00
if err != nil {
// No root so return old f
return f , nil
}
2019-06-17 16:34:30 +08:00
_ , err := tempF . newObjectWithInfo ( ctx , remote , nil )
2016-06-26 04:23:20 +08:00
if err != nil {
if err == fs . ErrorObjectNotFound {
// File doesn't exist so return old f
return f , nil
}
return nil , err
2015-10-05 05:08:31 +08:00
}
2018-10-14 21:41:26 +08:00
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
2019-07-29 01:47:38 +08:00
// See https://github.com/rclone/rclone/issues/2182
2018-10-14 21:41:26 +08:00
f . dirCache = tempF . dirCache
f . root = tempF . root
2016-06-22 01:01:53 +08:00
// return an error with an fs which points to the parent
2018-10-14 21:41:26 +08:00
return f , fs . ErrorIsFile
2015-10-05 05:08:31 +08:00
}
return f , nil
}
// rootSlash returns root with a slash on if it is empty, otherwise empty string
func ( f * Fs ) rootSlash ( ) string {
if f . root == "" {
return f . root
}
return f . root + "/"
}
// Return an Object from a path
//
2016-06-26 04:23:20 +08:00
// If it can't be found it returns the error fs.ErrorObjectNotFound.
2019-06-17 16:34:30 +08:00
func ( f * Fs ) newObjectWithInfo ( ctx context . Context , remote string , info * api . Item ) ( fs . Object , error ) {
2015-10-05 05:08:31 +08:00
o := & Object {
fs : f ,
remote : remote ,
}
2017-03-07 04:11:54 +08:00
var err error
2015-10-05 05:08:31 +08:00
if info != nil {
// Set info
2017-03-07 04:11:54 +08:00
err = o . setMetaData ( info )
2015-10-05 05:08:31 +08:00
} else {
2019-06-17 16:34:30 +08:00
err = o . readMetaData ( ctx ) // reads info and meta, returning an error
2017-03-07 04:11:54 +08:00
}
if err != nil {
return nil , err
2015-10-05 05:08:31 +08:00
}
2016-06-26 04:23:20 +08:00
return o , nil
2015-10-05 05:08:31 +08:00
}
2016-06-26 04:23:20 +08:00
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
2019-06-17 16:34:30 +08:00
func ( f * Fs ) NewObject ( ctx context . Context , remote string ) ( fs . Object , error ) {
return f . newObjectWithInfo ( ctx , remote , nil )
2015-10-05 05:08:31 +08:00
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
2019-06-17 16:34:30 +08:00
func ( f * Fs ) FindLeaf ( ctx context . Context , pathID , leaf string ) ( pathIDOut string , found bool , err error ) {
2017-02-09 19:01:20 +08:00
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
2019-01-09 13:11:00 +08:00
_ , ok := f . dirCache . GetInv ( pathID )
2015-10-05 05:08:31 +08:00
if ! ok {
2016-06-12 22:06:02 +08:00
return "" , false , errors . New ( "couldn't find parent ID" )
2015-10-05 05:08:31 +08:00
}
2019-09-05 03:00:37 +08:00
info , resp , err := f . readMetaDataForPathRelativeToID ( ctx , pathID , leaf )
2015-10-05 05:08:31 +08:00
if err != nil {
if resp != nil && resp . StatusCode == http . StatusNotFound {
return "" , false , nil
}
return "" , false , err
}
2018-10-03 12:46:25 +08:00
if info . GetPackageType ( ) == api . PackageTypeOneNote {
return "" , false , errors . New ( "found OneNote file when looking for folder" )
}
2018-07-12 01:48:59 +08:00
if info . GetFolder ( ) == nil {
2016-06-12 22:06:02 +08:00
return "" , false , errors . New ( "found file when looking for folder" )
2015-10-05 05:08:31 +08:00
}
2018-07-12 01:48:59 +08:00
return info . GetID ( ) , true , nil
2015-10-05 05:08:31 +08:00
}
// CreateDir makes a directory with pathID as parent and name leaf
2019-06-17 16:34:30 +08:00
func ( f * Fs ) CreateDir ( ctx context . Context , dirID , leaf string ) ( newID string , err error ) {
2018-07-12 01:48:59 +08:00
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
2015-10-05 05:08:31 +08:00
var resp * http . Response
var info * api . Item
2018-07-12 01:48:59 +08:00
opts := newOptsCall ( dirID , "POST" , "/children" )
2015-10-05 05:08:31 +08:00
mkdir := api . CreateItemRequest {
2020-01-15 01:33:35 +08:00
Name : f . opt . Enc . FromStandardName ( leaf ) ,
2015-10-05 05:08:31 +08:00
ConflictBehavior : "fail" ,
}
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = f . srv . CallJSON ( ctx , & opts , & mkdir , & info )
2015-10-05 05:08:31 +08:00
return shouldRetry ( resp , err )
} )
if err != nil {
//fmt.Printf("...Error %v\n", err)
return "" , err
}
2018-07-12 01:48:59 +08:00
2015-10-05 05:08:31 +08:00
//fmt.Printf("...Id %q\n", *info.Id)
2018-07-12 01:48:59 +08:00
return info . GetID ( ) , nil
2015-10-05 05:08:31 +08:00
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
// User function to process a File item from listAll
//
// Should return true to finish processing
type listAllFn func ( * api . Item ) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
2019-09-05 03:00:37 +08:00
func ( f * Fs ) listAll ( ctx context . Context , dirID string , directoriesOnly bool , filesOnly bool , fn listAllFn ) ( found bool , err error ) {
2015-10-05 05:08:31 +08:00
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
2018-08-18 18:06:22 +08:00
opts := newOptsCall ( dirID , "GET" , "/children?$top=1000" )
2015-10-05 05:08:31 +08:00
OUTER :
for {
var result api . ListChildrenResponse
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = f . srv . CallJSON ( ctx , & opts , nil , & result )
2015-10-05 05:08:31 +08:00
return shouldRetry ( resp , err )
} )
if err != nil {
2016-05-31 02:49:21 +08:00
return found , errors . Wrap ( err , "couldn't list files" )
2015-10-05 05:08:31 +08:00
}
if len ( result . Value ) == 0 {
break
}
for i := range result . Value {
item := & result . Value [ i ]
2018-07-12 01:48:59 +08:00
isFolder := item . GetFolder ( ) != nil
2015-10-05 05:08:31 +08:00
if isFolder {
if filesOnly {
continue
}
} else {
if directoriesOnly {
continue
}
}
if item . Deleted != nil {
continue
}
2020-01-15 01:33:35 +08:00
item . Name = f . opt . Enc . ToStandardName ( item . GetName ( ) )
2015-10-05 05:08:31 +08:00
if fn ( item ) {
found = true
break OUTER
}
}
if result . NextLink == "" {
break
}
2017-07-07 15:18:13 +08:00
opts . Path = ""
opts . RootURL = result . NextLink
2015-10-05 05:08:31 +08:00
}
return
}
2017-06-12 05:43:31 +08:00
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
2019-06-17 16:34:30 +08:00
func ( f * Fs ) List ( ctx context . Context , dir string ) ( entries fs . DirEntries , err error ) {
directoryID , err := f . dirCache . FindDir ( ctx , dir , false )
2017-06-12 05:43:31 +08:00
if err != nil {
return nil , err
}
var iErr error
2019-09-05 03:00:37 +08:00
_ , err = f . listAll ( ctx , directoryID , false , false , func ( info * api . Item ) bool {
2018-10-03 12:46:25 +08:00
if ! f . opt . ExposeOneNoteFiles && info . GetPackageType ( ) == api . PackageTypeOneNote {
fs . Debugf ( info . Name , "OneNote file not shown in directory listing" )
return false
}
2018-07-12 01:48:59 +08:00
remote := path . Join ( dir , info . GetName ( ) )
folder := info . GetFolder ( )
if folder != nil {
2017-06-12 05:43:31 +08:00
// cache the directory ID for later lookups
2018-07-12 01:48:59 +08:00
id := info . GetID ( )
f . dirCache . Put ( remote , id )
d := fs . NewDir ( remote , time . Time ( info . GetLastModifiedDateTime ( ) ) ) . SetID ( id )
2019-03-18 19:23:00 +08:00
d . SetItems ( folder . ChildCount )
2017-06-12 05:43:31 +08:00
entries = append ( entries , d )
2015-10-05 05:08:31 +08:00
} else {
2019-06-17 16:34:30 +08:00
o , err := f . newObjectWithInfo ( ctx , remote , info )
2016-06-26 04:23:20 +08:00
if err != nil {
2017-06-12 05:43:31 +08:00
iErr = err
2016-06-26 04:23:20 +08:00
return true
2015-10-05 05:08:31 +08:00
}
2017-06-12 05:43:31 +08:00
entries = append ( entries , o )
2015-10-05 05:08:31 +08:00
}
return false
} )
2017-06-12 05:43:31 +08:00
if err != nil {
return nil , err
}
if iErr != nil {
return nil , iErr
}
return entries , nil
2015-10-05 05:08:31 +08:00
}
2015-10-30 16:40:14 +08:00
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
2015-10-05 05:08:31 +08:00
//
2015-10-30 16:40:14 +08:00
// Returns the object, leaf, directoryID and error
2015-10-05 05:08:31 +08:00
//
2015-10-30 16:40:14 +08:00
// Used to create new objects
2019-06-17 16:34:30 +08:00
func ( f * Fs ) createObject ( ctx context . Context , remote string , modTime time . Time , size int64 ) ( o * Object , leaf string , directoryID string , err error ) {
2015-10-05 05:08:31 +08:00
// Create the directory for the object if it doesn't exist
2020-05-12 00:24:37 +08:00
leaf , directoryID , err = f . dirCache . FindPath ( ctx , remote , true )
2015-10-05 05:08:31 +08:00
if err != nil {
2015-10-30 16:40:14 +08:00
return nil , leaf , directoryID , err
2015-10-05 05:08:31 +08:00
}
// Temporary Object under construction
2015-10-30 16:40:14 +08:00
o = & Object {
2015-10-05 05:08:31 +08:00
fs : f ,
remote : remote ,
}
2015-10-30 16:40:14 +08:00
return o , leaf , directoryID , nil
}
// Put the object into the container
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
2019-06-17 16:34:30 +08:00
func ( f * Fs ) Put ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2016-02-18 19:35:25 +08:00
remote := src . Remote ( )
size := src . Size ( )
2019-06-17 16:34:30 +08:00
modTime := src . ModTime ( ctx )
2016-02-18 19:35:25 +08:00
2019-06-17 16:34:30 +08:00
o , _ , _ , err := f . createObject ( ctx , remote , modTime , size )
2015-10-30 16:40:14 +08:00
if err != nil {
return nil , err
}
2019-06-17 16:34:30 +08:00
return o , o . Update ( ctx , in , src , options ... )
2015-10-05 05:08:31 +08:00
}
// Mkdir creates the container if it doesn't exist
2019-06-17 16:34:30 +08:00
func ( f * Fs ) Mkdir ( ctx context . Context , dir string ) error {
2020-05-12 00:24:37 +08:00
_ , err := f . dirCache . FindDir ( ctx , dir , true )
2016-11-26 05:52:43 +08:00
return err
2015-10-05 05:08:31 +08:00
}
// deleteObject removes an object by ID
2019-09-05 03:00:37 +08:00
func ( f * Fs ) deleteObject ( ctx context . Context , id string ) error {
2018-07-12 01:48:59 +08:00
opts := newOptsCall ( id , "DELETE" , "" )
opts . NoResponse = true
2015-10-05 05:08:31 +08:00
return f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err := f . srv . Call ( ctx , & opts )
2015-10-05 05:08:31 +08:00
return shouldRetry ( resp , err )
} )
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
2019-06-17 16:34:30 +08:00
func ( f * Fs ) purgeCheck ( ctx context . Context , dir string , check bool ) error {
2016-11-26 05:52:43 +08:00
root := path . Join ( f . root , dir )
if root == "" {
2016-06-12 22:06:02 +08:00
return errors . New ( "can't purge root directory" )
2015-10-05 05:08:31 +08:00
}
dc := f . dirCache
2019-06-17 16:34:30 +08:00
rootID , err := dc . FindDir ( ctx , dir , false )
2015-10-05 05:08:31 +08:00
if err != nil {
return err
}
2018-09-01 06:07:12 +08:00
if check {
// check to see if there are any items
2019-09-05 03:00:37 +08:00
found , err := f . listAll ( ctx , rootID , false , false , func ( item * api . Item ) bool {
2018-09-01 06:07:12 +08:00
return true
} )
if err != nil {
return err
}
if found {
return fs . ErrorDirectoryNotEmpty
}
2015-10-05 05:08:31 +08:00
}
2019-09-05 03:00:37 +08:00
err = f . deleteObject ( ctx , rootID )
2015-10-05 05:08:31 +08:00
if err != nil {
return err
}
2016-11-26 05:52:43 +08:00
f . dirCache . FlushDir ( dir )
2015-10-05 05:08:31 +08:00
return nil
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
2019-06-17 16:34:30 +08:00
func ( f * Fs ) Rmdir ( ctx context . Context , dir string ) error {
return f . purgeCheck ( ctx , dir , true )
2015-10-05 05:08:31 +08:00
}
// Precision return the precision of this Fs
func ( f * Fs ) Precision ( ) time . Duration {
return time . Second
}
2015-10-30 16:40:14 +08:00
// waitForJob waits for the job with status in url to complete
2019-06-17 16:34:30 +08:00
func ( f * Fs ) waitForJob ( ctx context . Context , location string , o * Object ) error {
2020-11-05 19:33:32 +08:00
deadline := time . Now ( ) . Add ( f . ci . Timeout )
2015-10-30 16:40:14 +08:00
for time . Now ( ) . Before ( deadline ) {
var resp * http . Response
var err error
2017-03-12 20:00:10 +08:00
var body [ ] byte
2015-10-30 16:40:14 +08:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2018-08-18 18:06:22 +08:00
resp , err = http . Get ( location )
2017-03-12 20:00:10 +08:00
if err != nil {
2018-01-13 00:30:54 +08:00
return fserrors . ShouldRetry ( err ) , err
2017-03-12 20:00:10 +08:00
}
body , err = rest . ReadBody ( resp )
2018-01-13 00:30:54 +08:00
return fserrors . ShouldRetry ( err ) , err
2015-10-30 16:40:14 +08:00
} )
if err != nil {
return err
}
2017-03-12 20:00:10 +08:00
// Try to decode the body first as an api.AsyncOperationStatus
var status api . AsyncOperationStatus
err = json . Unmarshal ( body , & status )
if err != nil {
return errors . Wrapf ( err , "async status result not JSON: %q" , body )
}
2018-08-18 18:06:22 +08:00
switch status . Status {
case "failed" :
2021-01-05 21:26:00 +08:00
if strings . HasPrefix ( status . ErrorCode , "AccessDenied_" ) {
return errAsyncJobAccessDenied
2015-10-30 16:40:14 +08:00
}
2021-01-05 21:26:00 +08:00
fallthrough
case "deleteFailed" :
return errors . Errorf ( "%s: async operation returned %q" , o . remote , status . Status )
2018-08-18 18:06:22 +08:00
case "completed" :
2019-06-17 16:34:30 +08:00
err = o . readMetaData ( ctx )
2018-08-18 18:06:22 +08:00
return errors . Wrapf ( err , "async operation completed but readMetaData failed" )
2015-10-30 16:40:14 +08:00
}
2018-08-18 18:06:22 +08:00
2015-10-30 16:40:14 +08:00
time . Sleep ( 1 * time . Second )
}
2020-11-05 19:33:32 +08:00
return errors . Errorf ( "async operation didn't complete after %v" , f . ci . Timeout )
2015-10-30 16:40:14 +08:00
}
2020-10-14 05:43:40 +08:00
// Copy src to this remote using server-side copy operations.
2015-10-05 05:08:31 +08:00
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
2019-06-17 16:34:30 +08:00
func ( f * Fs ) Copy ( ctx context . Context , src fs . Object , remote string ) ( fs . Object , error ) {
2015-10-30 16:40:14 +08:00
srcObj , ok := src . ( * Object )
if ! ok {
2017-02-09 19:01:20 +08:00
fs . Debugf ( src , "Can't copy - not same remote type" )
2015-10-30 16:40:14 +08:00
return nil , fs . ErrorCantCopy
}
2021-01-05 21:26:00 +08:00
if f . driveType != srcObj . fs . driveType {
2021-01-06 02:57:51 +08:00
fs . Debugf ( src , "Can't server-side copy - drive types differ" )
return nil , fs . ErrorCantCopy
}
// For OneDrive Business, this is only supported within the same drive
if f . driveType != driveTypePersonal && srcObj . fs . driveID != f . driveID {
fs . Debugf ( src , "Can't server-side copy - cross-drive but not OneDrive Personal" )
2021-01-05 21:26:00 +08:00
return nil , fs . ErrorCantCopy
}
2019-06-17 16:34:30 +08:00
err := srcObj . readMetaData ( ctx )
2015-10-30 16:40:14 +08:00
if err != nil {
return nil , err
}
2020-03-15 20:07:46 +08:00
// Check we aren't overwriting a file on the same remote
if srcObj . fs == f {
srcPath := srcObj . rootPath ( )
dstPath := f . rootPath ( remote )
if strings . ToLower ( srcPath ) == strings . ToLower ( dstPath ) {
return nil , errors . Errorf ( "can't copy %q -> %q as are same name when lowercase" , srcPath , dstPath )
}
2017-02-23 03:28:22 +08:00
}
2015-10-30 16:40:14 +08:00
// Create temporary object
2019-06-17 16:34:30 +08:00
dstObj , leaf , directoryID , err := f . createObject ( ctx , remote , srcObj . modTime , srcObj . size )
2015-10-30 16:40:14 +08:00
if err != nil {
return nil , err
}
// Copy the object
2021-01-06 10:50:37 +08:00
// The query param is a workaround for OneDrive Business for #4590
opts := newOptsCall ( srcObj . id , "POST" , "/copy?@microsoft.graph.conflictBehavior=replace" )
2018-07-12 01:48:59 +08:00
opts . ExtraHeaders = map [ string ] string { "Prefer" : "respond-async" }
opts . NoResponse = true
2019-01-09 13:11:00 +08:00
id , dstDriveID , _ := parseNormalizedID ( directoryID )
2018-07-12 01:48:59 +08:00
2020-01-15 01:33:35 +08:00
replacedLeaf := f . opt . Enc . FromStandardName ( leaf )
2018-08-04 18:16:43 +08:00
copyReq := api . CopyItemRequest {
2015-10-30 16:40:14 +08:00
Name : & replacedLeaf ,
ParentReference : api . ItemReference {
2019-01-09 13:11:00 +08:00
DriveID : dstDriveID ,
2018-08-18 18:06:22 +08:00
ID : id ,
2015-10-30 16:40:14 +08:00
} ,
}
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = f . srv . CallJSON ( ctx , & opts , & copyReq , nil )
2015-10-30 16:40:14 +08:00
return shouldRetry ( resp , err )
} )
if err != nil {
return nil , err
}
// read location header
location := resp . Header . Get ( "Location" )
if location == "" {
2016-06-12 22:06:02 +08:00
return nil , errors . New ( "didn't receive location header in copy response" )
2015-10-30 16:40:14 +08:00
}
// Wait for job to finish
2019-06-17 16:34:30 +08:00
err = f . waitForJob ( ctx , location , dstObj )
2021-01-05 21:26:00 +08:00
if err == errAsyncJobAccessDenied {
fs . Debugf ( src , "Server-side copy failed - file not shared between drives" )
return nil , fs . ErrorCantCopy
}
2015-10-30 16:40:14 +08:00
if err != nil {
return nil , err
}
2018-03-15 15:06:17 +08:00
2018-03-15 15:06:17 +08:00
// Copy does NOT copy the modTime from the source and there seems to
// be no way to set date before
// This will create TWO versions on OneDrive
2019-06-17 16:34:30 +08:00
err = dstObj . SetModTime ( ctx , srcObj . ModTime ( ctx ) )
2018-03-15 15:06:17 +08:00
if err != nil {
return nil , err
}
2015-10-30 16:40:14 +08:00
return dstObj , nil
}
2015-10-05 05:08:31 +08:00
2020-06-05 05:25:14 +08:00
// Purge deletes all the files in the directory
2015-10-05 05:08:31 +08:00
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
2020-06-05 05:25:14 +08:00
func ( f * Fs ) Purge ( ctx context . Context , dir string ) error {
return f . purgeCheck ( ctx , dir , false )
2015-10-05 05:08:31 +08:00
}
2020-10-14 05:43:40 +08:00
// Move src to this remote using server-side move operations.
2017-03-14 23:35:10 +08:00
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
2019-06-17 16:34:30 +08:00
func ( f * Fs ) Move ( ctx context . Context , src fs . Object , remote string ) ( fs . Object , error ) {
2017-03-14 23:35:10 +08:00
srcObj , ok := src . ( * Object )
if ! ok {
fs . Debugf ( src , "Can't move - not same remote type" )
return nil , fs . ErrorCantMove
}
// Create temporary object
2019-06-17 16:34:30 +08:00
dstObj , leaf , directoryID , err := f . createObject ( ctx , remote , srcObj . modTime , srcObj . size )
2017-03-14 23:35:10 +08:00
if err != nil {
return nil , err
}
2019-01-09 13:11:00 +08:00
id , dstDriveID , _ := parseNormalizedID ( directoryID )
_ , srcObjDriveID , _ := parseNormalizedID ( srcObj . id )
2020-07-02 16:38:37 +08:00
if f . canonicalDriveID ( dstDriveID ) != srcObj . fs . canonicalDriveID ( srcObjDriveID ) {
2019-01-09 13:11:00 +08:00
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
// "Items cannot be moved between Drives using this request."
2020-07-02 16:38:37 +08:00
fs . Debugf ( f , "Can't move files between drives (%q != %q)" , dstDriveID , srcObjDriveID )
2019-01-09 13:11:00 +08:00
return nil , fs . ErrorCantMove
}
2017-03-14 23:35:10 +08:00
// Move the object
2018-07-12 01:48:59 +08:00
opts := newOptsCall ( srcObj . id , "PATCH" , "" )
2017-03-14 23:35:10 +08:00
move := api . MoveItemRequest {
2020-01-15 01:33:35 +08:00
Name : f . opt . Enc . FromStandardName ( leaf ) ,
2017-03-14 23:35:10 +08:00
ParentReference : & api . ItemReference {
2019-01-09 13:11:00 +08:00
DriveID : dstDriveID ,
ID : id ,
2017-03-14 23:35:10 +08:00
} ,
// We set the mod time too as it gets reset otherwise
FileSystemInfo : & api . FileSystemInfoFacet {
CreatedDateTime : api . Timestamp ( srcObj . modTime ) ,
LastModifiedDateTime : api . Timestamp ( srcObj . modTime ) ,
} ,
}
var resp * http . Response
var info api . Item
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = f . srv . CallJSON ( ctx , & opts , & move , & info )
2017-03-14 23:35:10 +08:00
return shouldRetry ( resp , err )
} )
if err != nil {
return nil , err
}
err = dstObj . setMetaData ( & info )
if err != nil {
return nil , err
}
return dstObj , nil
}
2018-08-03 00:13:37 +08:00
// DirMove moves src, srcRemote to this remote at dstRemote
2020-10-14 05:43:40 +08:00
// using server-side move operations.
2018-08-03 00:13:37 +08:00
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
2019-06-17 16:34:30 +08:00
func ( f * Fs ) DirMove ( ctx context . Context , src fs . Fs , srcRemote , dstRemote string ) error {
2018-08-03 00:13:37 +08:00
srcFs , ok := src . ( * Fs )
if ! ok {
fs . Debugf ( srcFs , "Can't move directory - not same remote type" )
return fs . ErrorCantDirMove
}
2020-05-12 18:39:30 +08:00
srcID , _ , _ , dstDirectoryID , dstLeaf , err := f . dirCache . DirMove ( ctx , srcFs . dirCache , srcFs . root , srcRemote , f . root , dstRemote )
2018-08-03 00:13:37 +08:00
if err != nil {
return err
}
2019-01-09 13:11:00 +08:00
2020-05-12 18:39:30 +08:00
parsedDstDirID , dstDriveID , _ := parseNormalizedID ( dstDirectoryID )
2019-01-09 13:11:00 +08:00
_ , srcDriveID , _ := parseNormalizedID ( srcID )
2020-07-02 16:38:37 +08:00
if f . canonicalDriveID ( dstDriveID ) != srcFs . canonicalDriveID ( srcDriveID ) {
2019-01-09 13:11:00 +08:00
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
// "Items cannot be moved between Drives using this request."
2020-07-02 16:38:37 +08:00
fs . Debugf ( f , "Can't move directories between drives (%q != %q)" , dstDriveID , srcDriveID )
2019-01-09 13:11:00 +08:00
return fs . ErrorCantDirMove
}
2018-08-03 00:13:37 +08:00
// Get timestamps of src so they can be preserved
2019-09-05 03:00:37 +08:00
srcInfo , _ , err := srcFs . readMetaDataForPathRelativeToID ( ctx , srcID , "" )
2018-08-03 00:13:37 +08:00
if err != nil {
return err
}
// Do the move
opts := newOptsCall ( srcID , "PATCH" , "" )
move := api . MoveItemRequest {
2020-05-12 18:39:30 +08:00
Name : f . opt . Enc . FromStandardName ( dstLeaf ) ,
2018-08-03 00:13:37 +08:00
ParentReference : & api . ItemReference {
2019-01-09 13:11:00 +08:00
DriveID : dstDriveID ,
ID : parsedDstDirID ,
2018-08-03 00:13:37 +08:00
} ,
// We set the mod time too as it gets reset otherwise
FileSystemInfo : & api . FileSystemInfoFacet {
CreatedDateTime : srcInfo . CreatedDateTime ,
LastModifiedDateTime : srcInfo . LastModifiedDateTime ,
} ,
}
var resp * http . Response
var info api . Item
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = f . srv . CallJSON ( ctx , & opts , & move , & info )
2018-08-03 00:13:37 +08:00
return shouldRetry ( resp , err )
} )
if err != nil {
return err
}
srcFs . dirCache . FlushDir ( srcRemote )
return nil
}
2016-12-09 23:39:29 +08:00
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func ( f * Fs ) DirCacheFlush ( ) {
f . dirCache . ResetRoot ( )
}
2018-04-17 05:19:25 +08:00
// About gets quota information
2019-06-17 16:34:30 +08:00
func ( f * Fs ) About ( ctx context . Context ) ( usage * fs . Usage , err error ) {
2018-04-17 05:19:25 +08:00
var drive api . Drive
opts := rest . Opts {
Method : "GET" ,
Path : "" ,
}
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = f . srv . CallJSON ( ctx , & opts , nil , & drive )
2018-04-17 05:19:25 +08:00
return shouldRetry ( resp , err )
} )
if err != nil {
return nil , errors . Wrap ( err , "about failed" )
}
q := drive . Quota
2020-10-07 17:58:04 +08:00
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
if q . Total == 0 && q . Used == 0 && q . Deleted == 0 && q . Remaining == 0 {
return & fs . Usage { } , nil
}
2018-04-17 05:19:25 +08:00
usage = & fs . Usage {
Total : fs . NewUsageValue ( q . Total ) , // quota of bytes that can be used
Used : fs . NewUsageValue ( q . Used ) , // bytes in use
Trashed : fs . NewUsageValue ( q . Deleted ) , // bytes in trash
Free : fs . NewUsageValue ( q . Remaining ) , // bytes which can be uploaded before reaching the quota
}
return usage , nil
}
2016-01-11 20:39:33 +08:00
// Hashes returns the supported hash sets.
2018-01-13 00:30:54 +08:00
func ( f * Fs ) Hashes ( ) hash . Set {
2018-08-18 18:06:22 +08:00
if f . driveType == driveTypePersonal {
return hash . Set ( hash . SHA1 )
2018-04-20 19:55:49 +08:00
}
2019-09-23 21:32:36 +08:00
return hash . Set ( QuickXorHashType )
2016-01-11 20:39:33 +08:00
}
2020-05-20 18:39:20 +08:00
// PublicLink returns a link for downloading without account.
2020-06-01 05:18:01 +08:00
func ( f * Fs ) PublicLink ( ctx context . Context , remote string , expire fs . Duration , unlink bool ) ( link string , err error ) {
2018-11-02 20:14:19 +08:00
info , _ , err := f . readMetaDataForPath ( ctx , f . rootPath ( remote ) )
2018-10-09 20:11:48 +08:00
if err != nil {
return "" , err
}
2019-01-09 13:11:00 +08:00
opts := newOptsCall ( info . GetID ( ) , "POST" , "/createLink" )
2018-10-09 20:11:48 +08:00
share := api . CreateShareLinkRequest {
Type : "view" ,
Scope : "anonymous" ,
}
var resp * http . Response
var result api . CreateShareLinkResponse
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = f . srv . CallJSON ( ctx , & opts , & share , & result )
2018-10-09 20:11:48 +08:00
return shouldRetry ( resp , err )
} )
if err != nil {
fmt . Println ( err )
return "" , err
}
return result . Link . WebURL , nil
}
2020-08-04 03:45:37 +08:00
// CleanUp deletes all the hidden files.
func ( f * Fs ) CleanUp ( ctx context . Context ) error {
2020-11-05 19:33:32 +08:00
token := make ( chan struct { } , f . ci . Checkers )
2020-08-04 03:45:37 +08:00
var wg sync . WaitGroup
err := walk . Walk ( ctx , f , "" , true , - 1 , func ( path string , entries fs . DirEntries , err error ) error {
err = entries . ForObjectError ( func ( obj fs . Object ) error {
o , ok := obj . ( * Object )
if ! ok {
return errors . New ( "internal error: not a onedrive object" )
}
wg . Add ( 1 )
token <- struct { } { }
go func ( ) {
defer func ( ) {
<- token
wg . Done ( )
} ( )
err := o . deleteVersions ( ctx )
if err != nil {
fs . Errorf ( o , "Failed to remove versions: %v" , err )
}
} ( )
return nil
} )
wg . Wait ( )
return err
} )
return err
}
// Finds and removes any old versions for o
func ( o * Object ) deleteVersions ( ctx context . Context ) error {
opts := newOptsCall ( o . id , "GET" , "/versions" )
var versions api . VersionsResponse
err := o . fs . pacer . Call ( func ( ) ( bool , error ) {
resp , err := o . fs . srv . CallJSON ( ctx , & opts , nil , & versions )
return shouldRetry ( resp , err )
} )
if err != nil {
return err
}
if len ( versions . Versions ) < 2 {
return nil
}
for _ , version := range versions . Versions [ 1 : ] {
err = o . deleteVersion ( ctx , version . ID )
if err != nil {
return err
}
}
return nil
}
// Finds and removes any old versions for o
func ( o * Object ) deleteVersion ( ctx context . Context , ID string ) error {
if operations . SkipDestructive ( ctx , fmt . Sprintf ( "%s of %s" , ID , o . remote ) , "delete version" ) {
return nil
}
fs . Infof ( o , "removing version %q" , ID )
opts := newOptsCall ( o . id , "DELETE" , "/versions/" + ID )
opts . NoResponse = true
return o . fs . pacer . Call ( func ( ) ( bool , error ) {
resp , err := o . fs . srv . Call ( ctx , & opts )
return shouldRetry ( resp , err )
} )
}
2015-10-05 05:08:31 +08:00
// ------------------------------------------------------------
// Fs returns the parent Fs
2016-02-18 19:35:25 +08:00
func ( o * Object ) Fs ( ) fs . Info {
2015-10-05 05:08:31 +08:00
return o . fs
}
// Return a string version
func ( o * Object ) String ( ) string {
if o == nil {
return "<nil>"
}
return o . remote
}
// Remote returns the remote path
func ( o * Object ) Remote ( ) string {
return o . remote
}
2018-11-02 20:14:19 +08:00
// rootPath returns a path for use in server given a remote
func ( f * Fs ) rootPath ( remote string ) string {
return f . rootSlash ( ) + remote
}
// rootPath returns a path for use in local functions
func ( o * Object ) rootPath ( ) string {
return o . fs . rootPath ( o . remote )
}
2018-10-14 21:17:53 +08:00
// srvPath returns a path for use in server given a remote
func ( f * Fs ) srvPath ( remote string ) string {
2020-01-15 01:33:35 +08:00
return f . opt . Enc . FromStandardPath ( f . rootSlash ( ) + remote )
2018-10-14 21:17:53 +08:00
}
2015-10-05 05:08:31 +08:00
// srvPath returns a path for use in server
func ( o * Object ) srvPath ( ) string {
2018-10-14 21:17:53 +08:00
return o . fs . srvPath ( o . remote )
2015-10-05 05:08:31 +08:00
}
2016-01-11 20:39:33 +08:00
// Hash returns the SHA-1 of an object returning a lowercase hex string
2019-06-17 16:34:30 +08:00
func ( o * Object ) Hash ( ctx context . Context , t hash . Type ) ( string , error ) {
2018-08-18 18:06:22 +08:00
if o . fs . driveType == driveTypePersonal {
if t == hash . SHA1 {
return o . sha1 , nil
}
} else {
2019-09-23 21:32:36 +08:00
if t == QuickXorHashType {
2018-08-18 18:06:22 +08:00
return o . quickxorhash , nil
2018-04-20 19:55:49 +08:00
}
2016-01-11 20:39:33 +08:00
}
2018-08-18 18:06:22 +08:00
return "" , hash . ErrUnsupported
2015-10-05 05:08:31 +08:00
}
// Size returns the size of an object in bytes
func ( o * Object ) Size ( ) int64 {
2019-06-17 16:34:30 +08:00
err := o . readMetaData ( context . TODO ( ) )
2015-10-05 05:08:31 +08:00
if err != nil {
2017-02-09 19:01:20 +08:00
fs . Logf ( o , "Failed to read metadata: %v" , err )
2015-10-05 05:08:31 +08:00
return 0
}
return o . size
}
// setMetaData sets the metadata from info
2017-03-07 04:11:54 +08:00
func ( o * Object ) setMetaData ( info * api . Item ) ( err error ) {
2018-07-12 01:48:59 +08:00
if info . GetFolder ( ) != nil {
2017-03-07 04:11:54 +08:00
return errors . Wrapf ( fs . ErrorNotAFile , "%q" , o . remote )
}
2015-10-05 05:08:31 +08:00
o . hasMetaData = true
2018-07-12 01:48:59 +08:00
o . size = info . GetSize ( )
2016-01-11 20:39:33 +08:00
2018-10-03 12:46:25 +08:00
o . isOneNoteFile = info . GetPackageType ( ) == api . PackageTypeOneNote
2018-04-20 19:55:49 +08:00
// Docs: https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/hashes
2016-01-17 18:45:17 +08:00
//
2018-04-20 19:55:49 +08:00
// We use SHA1 for onedrive personal and QuickXorHash for onedrive for business
2018-07-12 01:48:59 +08:00
file := info . GetFile ( )
if file != nil {
o . mimeType = file . MimeType
if file . Hashes . Sha1Hash != "" {
o . sha1 = strings . ToLower ( file . Hashes . Sha1Hash )
2016-09-22 05:13:24 +08:00
}
2018-07-12 01:48:59 +08:00
if file . Hashes . QuickXorHash != "" {
h , err := base64 . StdEncoding . DecodeString ( file . Hashes . QuickXorHash )
2018-04-20 19:55:49 +08:00
if err != nil {
2018-07-12 01:48:59 +08:00
fs . Errorf ( o , "Failed to decode QuickXorHash %q: %v" , file . Hashes . QuickXorHash , err )
2018-04-20 19:55:49 +08:00
} else {
o . quickxorhash = hex . EncodeToString ( h )
}
}
2016-01-11 20:39:33 +08:00
}
2018-07-12 01:48:59 +08:00
fileSystemInfo := info . GetFileSystemInfo ( )
if fileSystemInfo != nil {
o . modTime = time . Time ( fileSystemInfo . LastModifiedDateTime )
2015-10-05 05:08:31 +08:00
} else {
2018-07-12 01:48:59 +08:00
o . modTime = time . Time ( info . GetLastModifiedDateTime ( ) )
2015-10-05 05:08:31 +08:00
}
2018-07-12 01:48:59 +08:00
o . id = info . GetID ( )
2017-03-07 04:11:54 +08:00
return nil
2015-10-05 05:08:31 +08:00
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
2019-06-17 16:34:30 +08:00
func ( o * Object ) readMetaData ( ctx context . Context ) ( err error ) {
2015-10-05 05:08:31 +08:00
if o . hasMetaData {
return nil
}
2018-11-02 20:14:19 +08:00
info , _ , err := o . fs . readMetaDataForPath ( ctx , o . rootPath ( ) )
2015-10-05 05:08:31 +08:00
if err != nil {
2016-06-26 04:23:20 +08:00
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . ErrorInfo . Code == "itemNotFound" {
return fs . ErrorObjectNotFound
}
}
2015-10-05 05:08:31 +08:00
return err
}
2017-03-07 04:11:54 +08:00
return o . setMetaData ( info )
2015-10-05 05:08:31 +08:00
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
2019-06-17 16:34:30 +08:00
func ( o * Object ) ModTime ( ctx context . Context ) time . Time {
err := o . readMetaData ( ctx )
2015-10-05 05:08:31 +08:00
if err != nil {
2017-02-09 19:01:20 +08:00
fs . Logf ( o , "Failed to read metadata: %v" , err )
2015-10-05 05:08:31 +08:00
return time . Now ( )
}
return o . modTime
}
// setModTime sets the modification time of the local fs object
2019-06-17 16:34:30 +08:00
func ( o * Object ) setModTime ( ctx context . Context , modTime time . Time ) ( * api . Item , error ) {
2018-07-12 01:48:59 +08:00
var opts rest . Opts
2019-06-17 16:34:30 +08:00
leaf , directoryID , _ := o . fs . dirCache . FindPath ( ctx , o . remote , false )
2019-01-09 13:11:00 +08:00
trueDirID , drive , rootURL := parseNormalizedID ( directoryID )
2018-07-12 01:48:59 +08:00
if drive != "" {
opts = rest . Opts {
Method : "PATCH" ,
RootURL : rootURL ,
2020-01-15 01:33:35 +08:00
Path : "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon ( rest . URLPathEscape ( o . fs . opt . Enc . FromStandardName ( leaf ) ) ) ,
2018-07-12 01:48:59 +08:00
}
} else {
opts = rest . Opts {
Method : "PATCH" ,
2019-03-11 16:30:38 +08:00
Path : "/root:/" + withTrailingColon ( rest . URLPathEscape ( o . srvPath ( ) ) ) ,
2018-07-12 01:48:59 +08:00
}
2015-10-05 05:08:31 +08:00
}
update := api . SetFileSystemInfo {
FileSystemInfo : api . FileSystemInfoFacet {
CreatedDateTime : api . Timestamp ( modTime ) ,
LastModifiedDateTime : api . Timestamp ( modTime ) ,
} ,
}
var info * api . Item
err := o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err := o . fs . srv . CallJSON ( ctx , & opts , & update , & info )
2015-10-05 05:08:31 +08:00
return shouldRetry ( resp , err )
} )
2020-08-07 00:59:26 +08:00
// Remove versions if required
if o . fs . opt . NoVersions {
err := o . deleteVersions ( ctx )
if err != nil {
fs . Errorf ( o , "Failed to remove versions: %v" , err )
}
}
2015-10-05 05:08:31 +08:00
return info , err
}
// SetModTime sets the modification time of the local fs object
2019-06-17 16:34:30 +08:00
func ( o * Object ) SetModTime ( ctx context . Context , modTime time . Time ) error {
info , err := o . setModTime ( ctx , modTime )
2015-10-05 05:08:31 +08:00
if err != nil {
2016-03-22 23:07:10 +08:00
return err
2015-10-05 05:08:31 +08:00
}
2017-03-07 04:11:54 +08:00
return o . setMetaData ( info )
2015-10-05 05:08:31 +08:00
}
// Storable returns a boolean showing whether this object storable
func ( o * Object ) Storable ( ) bool {
return true
}
// Open an object for read
2019-06-17 16:34:30 +08:00
func ( o * Object ) Open ( ctx context . Context , options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2015-10-05 05:08:31 +08:00
if o . id == "" {
2016-06-12 22:06:02 +08:00
return nil , errors . New ( "can't download - no id" )
2015-10-05 05:08:31 +08:00
}
2018-10-03 12:46:25 +08:00
if o . isOneNoteFile {
return nil , errors . New ( "can't open a OneNote file" )
}
2018-01-23 01:05:00 +08:00
fs . FixRangeOption ( options , o . size )
2015-10-05 05:08:31 +08:00
var resp * http . Response
2018-07-12 01:48:59 +08:00
opts := newOptsCall ( o . id , "GET" , "/content" )
opts . Options = options
2015-10-05 05:08:31 +08:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = o . fs . srv . Call ( ctx , & opts )
2015-10-05 05:08:31 +08:00
return shouldRetry ( resp , err )
} )
if err != nil {
return nil , err
}
2018-02-16 21:21:26 +08:00
if resp . StatusCode == http . StatusOK && resp . ContentLength > 0 && resp . Header . Get ( "Content-Range" ) == "" {
//Overwrite size with actual size since size readings from Onedrive is unreliable.
o . size = resp . ContentLength
}
2015-10-05 05:08:31 +08:00
return resp . Body , err
}
// createUploadSession creates an upload session for the object
2019-06-17 16:34:30 +08:00
func ( o * Object ) createUploadSession ( ctx context . Context , modTime time . Time ) ( response * api . CreateUploadResponse , err error ) {
leaf , directoryID , _ := o . fs . dirCache . FindPath ( ctx , o . remote , false )
2019-01-09 13:11:00 +08:00
id , drive , rootURL := parseNormalizedID ( directoryID )
2018-07-12 01:48:59 +08:00
var opts rest . Opts
if drive != "" {
opts = rest . Opts {
Method : "POST" ,
RootURL : rootURL ,
2018-11-02 20:14:19 +08:00
Path : fmt . Sprintf ( "/%s/items/%s:/%s:/createUploadSession" ,
2020-01-15 01:33:35 +08:00
drive , id , rest . URLPathEscape ( o . fs . opt . Enc . FromStandardName ( leaf ) ) ) ,
2018-07-12 01:48:59 +08:00
}
} else {
opts = rest . Opts {
Method : "POST" ,
2018-08-18 18:06:22 +08:00
Path : "/root:/" + rest . URLPathEscape ( o . srvPath ( ) ) + ":/createUploadSession" ,
2018-07-12 01:48:59 +08:00
}
2015-10-05 05:08:31 +08:00
}
2018-03-15 15:06:17 +08:00
createRequest := api . CreateUploadRequest { }
2018-03-17 03:18:51 +08:00
createRequest . Item . FileSystemInfo . CreatedDateTime = api . Timestamp ( modTime )
2018-03-15 15:06:17 +08:00
createRequest . Item . FileSystemInfo . LastModifiedDateTime = api . Timestamp ( modTime )
2015-10-05 05:08:31 +08:00
var resp * http . Response
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = o . fs . srv . CallJSON ( ctx , & opts , & createRequest , & response )
2018-10-03 12:46:25 +08:00
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . ErrorInfo . Code == "nameAlreadyExists" {
// Make the error more user-friendly
err = errors . New ( err . Error ( ) + " (is it a OneNote file?)" )
}
}
2015-10-05 05:08:31 +08:00
return shouldRetry ( resp , err )
} )
2018-03-15 15:06:17 +08:00
return response , err
2015-10-05 05:08:31 +08:00
}
2020-01-25 18:41:20 +08:00
// getPosition gets the current position in a multipart upload
func ( o * Object ) getPosition ( ctx context . Context , url string ) ( pos int64 , err error ) {
2015-11-27 20:46:13 +08:00
opts := rest . Opts {
2020-01-25 18:41:20 +08:00
Method : "GET" ,
RootURL : url ,
}
var info api . UploadFragmentResponse
var resp * http . Response
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
resp , err = o . fs . srv . CallJSON ( ctx , & opts , nil , & info )
return shouldRetry ( resp , err )
} )
if err != nil {
return 0 , err
}
if len ( info . NextExpectedRanges ) != 1 {
return 0 , errors . Errorf ( "bad number of ranges in upload position: %v" , info . NextExpectedRanges )
}
position := info . NextExpectedRanges [ 0 ]
i := strings . IndexByte ( position , '-' )
if i < 0 {
return 0 , errors . Errorf ( "no '-' in next expected range: %q" , position )
2015-10-05 05:08:31 +08:00
}
2020-01-25 18:41:20 +08:00
position = position [ : i ]
pos , err = strconv . ParseInt ( position , 10 , 64 )
if err != nil {
return 0 , errors . Wrapf ( err , "bad expected range: %q" , position )
}
return pos , nil
}
// uploadFragment uploads a part
2020-03-22 06:31:51 +08:00
func ( o * Object ) uploadFragment ( ctx context . Context , url string , start int64 , totalSize int64 , chunk io . ReadSeeker , chunkSize int64 , options ... fs . OpenOption ) ( info * api . Item , err error ) {
2018-03-15 15:06:17 +08:00
// var response api.UploadFragmentResponse
2015-10-05 05:08:31 +08:00
var resp * http . Response
2019-08-28 18:21:38 +08:00
var body [ ] byte
2020-01-25 18:41:20 +08:00
var skip = int64 ( 0 )
2015-10-05 05:08:31 +08:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2020-01-25 18:41:20 +08:00
toSend := chunkSize - skip
opts := rest . Opts {
Method : "PUT" ,
RootURL : url ,
ContentLength : & toSend ,
ContentRange : fmt . Sprintf ( "bytes %d-%d/%d" , start + skip , start + chunkSize - 1 , totalSize ) ,
Body : chunk ,
2020-03-22 06:31:51 +08:00
Options : options ,
2020-01-25 18:41:20 +08:00
}
_ , _ = chunk . Seek ( skip , io . SeekStart )
2019-09-05 03:00:37 +08:00
resp , err = o . fs . srv . Call ( ctx , & opts )
2020-01-25 18:41:20 +08:00
if err != nil && resp != nil && resp . StatusCode == http . StatusRequestedRangeNotSatisfiable {
fs . Debugf ( o , "Received 416 error - reading current position from server: %v" , err )
pos , posErr := o . getPosition ( ctx , url )
if posErr != nil {
fs . Debugf ( o , "Failed to read position: %v" , posErr )
return false , posErr
}
skip = pos - start
fs . Debugf ( o , "Read position %d, chunk is %d..%d, bytes to skip = %d" , pos , start , start + chunkSize , skip )
switch {
case skip < 0 :
return false , errors . Wrapf ( err , "sent block already (skip %d < 0), can't rewind" , skip )
case skip > chunkSize :
return false , errors . Wrapf ( err , "position is in the future (skip %d > chunkSize %d), can't skip forward" , skip , chunkSize )
case skip == chunkSize :
fs . Debugf ( o , "Skipping chunk as already sent (skip %d == chunkSize %d)" , skip , chunkSize )
return false , nil
}
return true , errors . Wrapf ( err , "retry this chunk skipping %d bytes" , skip )
}
2019-08-28 18:21:38 +08:00
if err != nil {
return shouldRetry ( resp , err )
2018-03-27 00:17:56 +08:00
}
2019-08-28 18:21:38 +08:00
body , err = rest . ReadBody ( resp )
if err != nil {
return shouldRetry ( resp , err )
}
if resp . StatusCode == 200 || resp . StatusCode == 201 {
// we are done :)
// read the item
info = & api . Item { }
return false , json . Unmarshal ( body , info )
2018-03-15 15:06:17 +08:00
}
2019-08-28 18:21:38 +08:00
return false , nil
2015-10-05 05:08:31 +08:00
} )
2018-03-17 17:46:06 +08:00
return info , err
2015-10-05 05:08:31 +08:00
}
// cancelUploadSession cancels an upload session
2019-09-05 03:00:37 +08:00
func ( o * Object ) cancelUploadSession ( ctx context . Context , url string ) ( err error ) {
2015-11-27 20:46:13 +08:00
opts := rest . Opts {
2015-10-05 05:08:31 +08:00
Method : "DELETE" ,
2017-07-07 15:18:13 +08:00
RootURL : url ,
2015-10-05 05:08:31 +08:00
NoResponse : true ,
}
var resp * http . Response
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = o . fs . srv . Call ( ctx , & opts )
2015-10-05 05:08:31 +08:00
return shouldRetry ( resp , err )
} )
return
}
// uploadMultipart uploads a file using multipart upload
2020-03-22 06:31:51 +08:00
func ( o * Object ) uploadMultipart ( ctx context . Context , in io . Reader , size int64 , modTime time . Time , options ... fs . OpenOption ) ( info * api . Item , err error ) {
2018-09-04 23:57:47 +08:00
if size <= 0 {
2018-11-02 20:12:22 +08:00
return nil , errors . New ( "unknown-sized upload not supported" )
2018-09-04 23:57:47 +08:00
}
2015-10-05 05:08:31 +08:00
// Create upload session
2017-02-09 19:01:20 +08:00
fs . Debugf ( o , "Starting multipart upload" )
2019-06-17 16:34:30 +08:00
session , err := o . createUploadSession ( ctx , modTime )
2015-10-05 05:08:31 +08:00
if err != nil {
2018-03-17 17:46:06 +08:00
return nil , err
2015-10-05 05:08:31 +08:00
}
uploadURL := session . UploadURL
2020-06-25 21:35:41 +08:00
// Cancel the session if something went wrong
defer atexit . OnError ( & err , func ( ) {
fs . Debugf ( o , "Cancelling multipart upload: %v" , err )
cancelErr := o . cancelUploadSession ( ctx , uploadURL )
if cancelErr != nil {
fs . Logf ( o , "Failed to cancel multipart upload: %v" , cancelErr )
2015-10-05 05:08:31 +08:00
}
2020-06-25 21:35:41 +08:00
} ) ( )
2015-10-05 05:08:31 +08:00
// Upload the chunks
remaining := size
position := int64 ( 0 )
for remaining > 0 {
2018-05-15 01:06:57 +08:00
n := int64 ( o . fs . opt . ChunkSize )
2015-10-05 05:08:31 +08:00
if remaining < n {
n = remaining
}
2018-01-13 00:30:54 +08:00
seg := readers . NewRepeatableReader ( io . LimitReader ( in , n ) )
2017-02-09 19:01:20 +08:00
fs . Debugf ( o , "Uploading segment %d/%d size %d" , position , size , n )
2020-03-22 06:31:51 +08:00
info , err = o . uploadFragment ( ctx , uploadURL , position , size , seg , n , options ... )
2015-10-05 05:08:31 +08:00
if err != nil {
2018-03-17 17:46:06 +08:00
return nil , err
2015-10-05 05:08:31 +08:00
}
remaining -= n
position += n
}
2018-03-17 17:46:06 +08:00
return info , nil
}
2018-09-04 23:57:47 +08:00
// Update the content of a remote file within 4MB size in one single request
// This function will set modtime after uploading, which will create a new version for the remote file
2020-03-22 06:31:51 +08:00
func ( o * Object ) uploadSinglepart ( ctx context . Context , in io . Reader , size int64 , modTime time . Time , options ... fs . OpenOption ) ( info * api . Item , err error ) {
2018-09-05 00:37:52 +08:00
if size < 0 || size > int64 ( fs . SizeSuffix ( 4 * 1024 * 1024 ) ) {
2018-11-02 20:12:22 +08:00
return nil , errors . New ( "size passed into uploadSinglepart must be >= 0 and <= 4MiB" )
2018-09-04 23:57:47 +08:00
}
fs . Debugf ( o , "Starting singlepart upload" )
var resp * http . Response
var opts rest . Opts
2019-06-17 16:34:30 +08:00
leaf , directoryID , _ := o . fs . dirCache . FindPath ( ctx , o . remote , false )
2019-01-09 13:11:00 +08:00
trueDirID , drive , rootURL := parseNormalizedID ( directoryID )
2018-09-04 23:57:47 +08:00
if drive != "" {
opts = rest . Opts {
Method : "PUT" ,
RootURL : rootURL ,
2020-01-15 01:33:35 +08:00
Path : "/" + drive + "/items/" + trueDirID + ":/" + rest . URLPathEscape ( o . fs . opt . Enc . FromStandardName ( leaf ) ) + ":/content" ,
2018-09-04 23:57:47 +08:00
ContentLength : & size ,
Body : in ,
2020-03-22 06:31:51 +08:00
Options : options ,
2018-09-04 23:57:47 +08:00
}
} else {
opts = rest . Opts {
Method : "PUT" ,
Path : "/root:/" + rest . URLPathEscape ( o . srvPath ( ) ) + ":/content" ,
ContentLength : & size ,
Body : in ,
2020-03-22 06:31:51 +08:00
Options : options ,
2018-09-04 23:57:47 +08:00
}
}
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = o . fs . srv . CallJSON ( ctx , & opts , nil , & info )
2018-10-03 12:46:25 +08:00
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . ErrorInfo . Code == "nameAlreadyExists" {
// Make the error more user-friendly
err = errors . New ( err . Error ( ) + " (is it a OneNote file?)" )
}
}
2018-09-04 23:57:47 +08:00
return shouldRetry ( resp , err )
} )
if err != nil {
return nil , err
}
err = o . setMetaData ( info )
if err != nil {
return nil , err
}
// Set the mod time now and read metadata
2019-06-17 16:34:30 +08:00
return o . setModTime ( ctx , modTime )
2018-09-04 23:57:47 +08:00
}
2015-10-05 05:08:31 +08:00
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
2019-06-17 16:34:30 +08:00
func ( o * Object ) Update ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( err error ) {
2018-10-03 12:46:25 +08:00
if o . hasMetaData && o . isOneNoteFile {
return errors . New ( "can't upload content to a OneNote file" )
}
2017-01-30 04:42:43 +08:00
o . fs . tokenRenewer . Start ( )
defer o . fs . tokenRenewer . Stop ( )
2016-02-18 19:35:25 +08:00
size := src . Size ( )
2019-06-17 16:34:30 +08:00
modTime := src . ModTime ( ctx )
2016-02-18 19:35:25 +08:00
2018-09-04 23:57:47 +08:00
var info * api . Item
if size > 0 {
2020-03-22 06:31:51 +08:00
info , err = o . uploadMultipart ( ctx , in , size , modTime , options ... )
2018-09-04 23:57:47 +08:00
} else if size == 0 {
2020-03-22 06:31:51 +08:00
info , err = o . uploadSinglepart ( ctx , in , size , modTime , options ... )
2018-09-04 23:57:47 +08:00
} else {
2019-02-02 16:37:33 +08:00
return errors . New ( "unknown-sized upload not supported" )
2018-09-04 23:57:47 +08:00
}
2015-10-05 05:08:31 +08:00
if err != nil {
return err
}
2018-09-04 23:57:47 +08:00
2020-08-07 00:59:26 +08:00
// If updating the file then remove versions
if o . fs . opt . NoVersions && o . hasMetaData {
err = o . deleteVersions ( ctx )
if err != nil {
fs . Errorf ( o , "Failed to remove versions: %v" , err )
}
}
2018-03-15 15:06:17 +08:00
return o . setMetaData ( info )
2015-10-05 05:08:31 +08:00
}
// Remove an object
2019-06-17 16:34:30 +08:00
func ( o * Object ) Remove ( ctx context . Context ) error {
2019-09-05 03:00:37 +08:00
return o . fs . deleteObject ( ctx , o . id )
2015-10-05 05:08:31 +08:00
}
2016-09-22 05:13:24 +08:00
// MimeType of an Object if known, "" otherwise
2019-06-17 16:34:30 +08:00
func ( o * Object ) MimeType ( ctx context . Context ) string {
2016-09-22 05:13:24 +08:00
return o . mimeType
}
2018-05-13 16:16:56 +08:00
// ID returns the ID of the Object if known, or "" if not
func ( o * Object ) ID ( ) string {
return o . id
}
2019-01-09 13:11:00 +08:00
func newOptsCall ( normalizedID string , method string , route string ) ( opts rest . Opts ) {
id , drive , rootURL := parseNormalizedID ( normalizedID )
2018-07-12 01:48:59 +08:00
if drive != "" {
return rest . Opts {
Method : method ,
RootURL : rootURL ,
Path : "/" + drive + "/items/" + id + route ,
}
}
return rest . Opts {
Method : method ,
Path : "/items/" + id + route ,
}
}
2019-01-09 13:11:00 +08:00
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
// and returns itemID, driveID, rootURL.
// Such a normalized ID can come from (*Item).GetID()
func parseNormalizedID ( ID string ) ( string , string , string ) {
2018-07-12 01:48:59 +08:00
if strings . Index ( ID , "#" ) >= 0 {
s := strings . Split ( ID , "#" )
2018-08-18 18:06:22 +08:00
return s [ 1 ] , s [ 0 ] , graphURL + "/drives"
2018-07-12 01:48:59 +08:00
}
return ID , "" , ""
}
2020-07-02 16:38:37 +08:00
// Returns the canonical form of the driveID
func ( f * Fs ) canonicalDriveID ( driveID string ) ( canonicalDriveID string ) {
if driveID == "" {
canonicalDriveID = f . opt . DriveID
} else {
canonicalDriveID = driveID
}
canonicalDriveID = strings . ToLower ( canonicalDriveID )
return canonicalDriveID
}
2019-01-09 13:11:00 +08:00
// getRelativePathInsideBase checks if `target` is inside `base`. If so, it
// returns a relative path for `target` based on `base` and a boolean `true`.
// Otherwise returns "", false.
func getRelativePathInsideBase ( base , target string ) ( string , bool ) {
if base == "" {
return target , true
}
baseSlash := base + "/"
if strings . HasPrefix ( target + "/" , baseSlash ) {
return target [ len ( baseSlash ) : ] , true
}
return "" , false
}
2019-03-11 16:30:38 +08:00
// Adds a ":" at the end of `remotePath` in a proper manner.
// If `remotePath` already ends with "/", change it to ":/"
// If `remotePath` is "", return "".
// A workaround for #2720 and #3039
func withTrailingColon ( remotePath string ) string {
if remotePath == "" {
return ""
}
if strings . HasSuffix ( remotePath , "/" ) {
return remotePath [ : len ( remotePath ) - 1 ] + ":/"
}
return remotePath + ":"
}
2015-10-05 05:08:31 +08:00
// Check the interfaces are satisfied
var (
2018-08-19 23:22:51 +08:00
_ fs . Fs = ( * Fs ) ( nil )
_ fs . Purger = ( * Fs ) ( nil )
_ fs . Copier = ( * Fs ) ( nil )
_ fs . Mover = ( * Fs ) ( nil )
_ fs . DirMover = ( * Fs ) ( nil )
2016-12-09 23:39:29 +08:00
_ fs . DirCacheFlusher = ( * Fs ) ( nil )
2018-04-17 05:19:25 +08:00
_ fs . Abouter = ( * Fs ) ( nil )
2018-10-09 20:11:48 +08:00
_ fs . PublicLinker = ( * Fs ) ( nil )
2020-08-04 03:45:37 +08:00
_ fs . CleanUpper = ( * Fs ) ( nil )
2016-12-09 23:39:29 +08:00
_ fs . Object = ( * Object ) ( nil )
_ fs . MimeTyper = & Object { }
2018-05-13 16:16:56 +08:00
_ fs . IDer = & Object { }
2015-10-05 05:08:31 +08:00
)