2022-08-28 19:21:57 +08:00
// Package b2 provides an interface to the Backblaze B2 object storage system.
2015-11-28 02:25:52 +08:00
package b2
2016-01-19 01:53:03 +08:00
// FIXME should we remove sha1 checks from here as rclone now supports
// checking SHA1s?
2015-11-28 02:25:52 +08:00
import (
2017-09-17 04:43:48 +08:00
"bufio"
2015-11-28 02:25:52 +08:00
"bytes"
2019-06-17 16:34:30 +08:00
"context"
2015-11-28 02:25:52 +08:00
"crypto/sha1"
2023-10-28 22:12:40 +08:00
"encoding/json"
2021-11-04 18:12:57 +08:00
"errors"
2015-11-28 02:25:52 +08:00
"fmt"
2018-01-13 00:30:54 +08:00
gohash "hash"
2015-11-28 02:25:52 +08:00
"io"
"net/http"
"path"
"strconv"
"strings"
"sync"
"time"
2019-07-29 01:47:38 +08:00
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
2020-01-15 01:33:35 +08:00
"github.com/rclone/rclone/fs/config"
2019-07-29 01:47:38 +08:00
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
2019-08-09 22:19:02 +08:00
"github.com/rclone/rclone/lib/bucket"
2020-01-15 01:33:35 +08:00
"github.com/rclone/rclone/lib/encoder"
2023-08-19 22:32:45 +08:00
"github.com/rclone/rclone/lib/multipart"
2019-07-29 01:47:38 +08:00
"github.com/rclone/rclone/lib/pacer"
2020-05-29 19:36:31 +08:00
"github.com/rclone/rclone/lib/pool"
2019-07-29 01:47:38 +08:00
"github.com/rclone/rclone/lib/rest"
2015-11-28 02:25:52 +08:00
)
const (
2018-05-15 01:06:57 +08:00
defaultEndpoint = "https://api.backblazeb2.com"
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
timeKey = "src_last_modified_millis"
timeHeader = headerPrefix + timeKey
sha1Key = "large_file_sha1"
sha1Header = "X-Bz-Content-Sha1"
testModeHeader = "X-Bz-Test-Mode"
2020-12-04 01:24:46 +08:00
idHeader = "X-Bz-File-Id"
nameHeader = "X-Bz-File-Name"
timestampHeader = "X-Bz-Upload-Timestamp"
2018-05-15 01:06:57 +08:00
retryAfterHeader = "Retry-After"
minSleep = 10 * time . Millisecond
maxSleep = 5 * time . Minute
decayConstant = 1 // bigger for slower decay, exponential
maxParts = 10000
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
2021-03-03 03:11:57 +08:00
minChunkSize = 5 * fs . Mebi
defaultChunkSize = 96 * fs . Mebi
defaultUploadCutoff = 200 * fs . Mebi
2023-08-19 22:32:45 +08:00
largeFileCopyCutoff = 4 * fs . Gibi // 5E9 is the max
2024-03-01 08:49:34 +08:00
defaultMaxAge = 24 * time . Hour
2016-06-16 01:49:11 +08:00
)
// Globals
var (
2022-03-15 11:55:20 +08:00
errNotWithVersions = errors . New ( "can't modify or delete files in --b2-versions mode" )
errNotWithVersionAt = errors . New ( "can't modify or delete files in --b2-version-at mode" )
2015-11-28 02:25:52 +08:00
)
// Register with Fs
func init ( ) {
2016-02-18 19:35:25 +08:00
fs . Register ( & fs . RegInfo {
2016-02-16 02:11:53 +08:00
Name : "b2" ,
Description : "Backblaze B2" ,
NewFs : NewFs ,
2023-09-16 23:44:28 +08:00
CommandHelp : commandHelp ,
2015-11-28 02:25:52 +08:00
Options : [ ] fs . Option { {
2023-07-07 00:55:53 +08:00
Name : "account" ,
Help : "Account ID or Application Key ID." ,
Required : true ,
Sensitive : true ,
2015-11-28 02:25:52 +08:00
} , {
2023-07-07 00:55:53 +08:00
Name : "key" ,
Help : "Application Key." ,
Required : true ,
Sensitive : true ,
2015-11-28 02:25:52 +08:00
} , {
2018-05-15 01:06:57 +08:00
Name : "endpoint" ,
2021-08-16 17:30:01 +08:00
Help : "Endpoint for the service.\n\nLeave blank normally." ,
2018-05-15 01:06:57 +08:00
Advanced : true ,
} , {
2018-10-02 01:36:15 +08:00
Name : "test_mode" ,
Help : ` A flag string for X - Bz - Test - Mode header for debugging .
This is for debugging purposes only . Setting it to one of the strings
below will cause b2 to return specific errors :
* "fail_some_uploads"
* "expire_some_account_authorization_tokens"
* "force_cap_exceeded"
These will be set in the "X-Bz-Test-Mode" header which is documented
in the [ b2 integrations checklist ] ( https : //www.backblaze.com/b2/docs/integration_checklist.html).`,
2018-05-15 01:06:57 +08:00
Default : "" ,
Hide : fs . OptionHideConfigurator ,
Advanced : true ,
} , {
Name : "versions" ,
2021-08-16 17:30:01 +08:00
Help : "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them." ,
2018-05-15 01:06:57 +08:00
Default : false ,
Advanced : true ,
2022-03-15 11:55:20 +08:00
} , {
Name : "version_at" ,
Help : "Show file versions as they were at the specified time.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them." ,
Default : fs . Time { } ,
Advanced : true ,
2018-05-15 01:06:57 +08:00
} , {
Name : "hard_delete" ,
Help : "Permanently delete files on remote removal, otherwise hide files." ,
Default : false ,
} , {
2018-10-02 01:36:15 +08:00
Name : "upload_cutoff" ,
Help : ` Cutoff for switching to chunked upload .
Files above this size will be uploaded in chunks of "--b2-chunk-size" .
2021-03-03 03:11:57 +08:00
This value should be set no larger than 4.657 GiB ( == 5 GB ) . ` ,
2019-01-12 01:17:46 +08:00
Default : defaultUploadCutoff ,
2018-05-15 01:06:57 +08:00
Advanced : true ,
2020-05-29 19:36:31 +08:00
} , {
Name : "copy_cutoff" ,
2021-08-16 17:30:01 +08:00
Help : ` Cutoff for switching to multipart copy .
2020-05-29 19:36:31 +08:00
2020-10-14 05:43:40 +08:00
Any files larger than this that need to be server - side copied will be
2020-05-29 19:36:31 +08:00
copied in chunks of this size .
2021-03-03 03:11:57 +08:00
The minimum is 0 and the maximum is 4.6 GiB . ` ,
2020-05-29 19:36:31 +08:00
Default : largeFileCopyCutoff ,
Advanced : true ,
2018-05-15 01:06:57 +08:00
} , {
2018-10-02 01:36:15 +08:00
Name : "chunk_size" ,
2021-08-16 17:30:01 +08:00
Help : ` Upload chunk size .
2018-10-02 01:36:15 +08:00
2021-08-16 17:30:01 +08:00
When uploading large files , chunk the file into this size .
Must fit in memory . These chunks are buffered in memory and there
might a maximum of "--transfers" chunks in progress at once .
5 , 000 , 000 Bytes is the minimum size . ` ,
2019-01-12 01:17:46 +08:00
Default : defaultChunkSize ,
2018-05-15 01:06:57 +08:00
Advanced : true ,
2023-08-19 22:32:45 +08:00
} , {
Name : "upload_concurrency" ,
Help : ` Concurrency for multipart uploads .
This is the number of chunks of the same file that are uploaded
concurrently .
Note that chunks are stored in memory and there may be up to
"--transfers" * "--b2-upload-concurrency" chunks stored at once
in memory . ` ,
2023-09-25 22:24:55 +08:00
Default : 4 ,
2023-08-19 22:32:45 +08:00
Advanced : true ,
2019-01-20 23:33:42 +08:00
} , {
2020-04-24 02:48:09 +08:00
Name : "disable_checksum" ,
2021-08-16 17:30:01 +08:00
Help : ` Disable checksums for large ( > upload cutoff ) files .
2020-04-24 02:48:09 +08:00
Normally rclone will calculate the SHA1 checksum of the input before
uploading it so it can add it to metadata on the object . This is great
for data integrity checking but can cause long delays for large files
to start uploading . ` ,
2019-01-20 23:33:42 +08:00
Default : false ,
Advanced : true ,
2019-02-10 05:56:24 +08:00
} , {
Name : "download_url" ,
Help : ` Custom endpoint for downloads .
This is usually set to a Cloudflare CDN URL as Backblaze offers
free egress for data downloaded through the Cloudflare network .
2020-12-30 11:45:52 +08:00
Rclone works with private buckets by sending an "Authorization" header .
If the custom endpoint rewrites the requests for authentication ,
e . g . , in Cloudflare Workers , this header needs to be handled properly .
2021-11-17 23:10:20 +08:00
Leave blank if you want to use the endpoint provided by Backblaze .
The URL provided here SHOULD have the protocol and SHOULD NOT have
a trailing slash or specify the / file / bucket subpath as rclone will
request files with "{download_url}/file/{bucket_name}/{path}" .
Example :
> https : //mysubdomain.mydomain.tld
( No trailing "/" , "file" or "bucket" ) ` ,
2019-02-10 05:56:24 +08:00
Advanced : true ,
2019-07-01 04:51:59 +08:00
} , {
Name : "download_auth_duration" ,
2024-02-08 17:39:53 +08:00
Help : ` Time before the public link authorization token will expire in s or suffix ms | s | m | h | d .
This is used in combination with "rclone link" for making files
accessible to the public and sets the duration before the download
authorization token will expire .
2019-07-01 04:51:59 +08:00
The minimum value is 1 second . The maximum value is one week . ` ,
Default : fs . Duration ( 7 * 24 * time . Hour ) ,
Advanced : true ,
2020-05-29 19:36:31 +08:00
} , {
Name : "memory_pool_flush_time" ,
2023-08-19 22:32:45 +08:00
Default : fs . Duration ( time . Minute ) ,
2020-05-29 19:36:31 +08:00
Advanced : true ,
2023-08-19 22:32:45 +08:00
Hide : fs . OptionHideBoth ,
Help : ` How often internal memory buffer pools will be flushed. (no longer used) ` ,
2020-05-29 19:36:31 +08:00
} , {
Name : "memory_pool_use_mmap" ,
2023-08-19 22:32:45 +08:00
Default : false ,
2020-05-29 19:36:31 +08:00
Advanced : true ,
2023-08-19 22:32:45 +08:00
Hide : fs . OptionHideBoth ,
Help : ` Whether to use mmap buffers in internal memory pool. (no longer used) ` ,
2023-09-17 00:01:43 +08:00
} , {
Name : "lifecycle" ,
Help : ` Set the number of days deleted files should be kept when creating a bucket .
On bucket creation , this parameter is used to create a lifecycle rule
for the entire bucket .
If lifecycle is 0 ( the default ) it does not create a lifecycle rule so
the default B2 behaviour applies . This is to create versions of files
on delete and overwrite and to keep them indefinitely .
If lifecycle is > 0 then it creates a single rule setting the number of
days before a file that is deleted or overwritten is deleted
permanently . This is known as daysFromHidingToDeleting in the b2 docs .
The minimum value for this parameter is 1 day .
You can also enable hard_delete in the config also which will mean
deletions won ' t cause versions but overwrites will still cause
versions to be made .
See : [ rclone backend lifecycle ] ( # lifecycle ) for setting lifecycles after bucket creation .
` ,
Default : 0 ,
Advanced : true ,
2020-01-15 01:33:35 +08:00
} , {
Name : config . ConfigEncoding ,
Help : config . ConfigEncodingHelp ,
Advanced : true ,
2020-01-15 05:51:49 +08:00
// See: https://www.backblaze.com/b2/docs/files.html
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
// FIXME: allow /, but not leading, trailing or double
Default : ( encoder . Display |
encoder . EncodeBackSlash |
encoder . EncodeInvalidUtf8 ) ,
2018-05-15 01:06:57 +08:00
} } ,
2015-11-28 02:25:52 +08:00
} )
2018-05-15 01:06:57 +08:00
}
// Options defines the configuration for this backend
type Options struct {
2020-01-15 01:33:35 +08:00
Account string ` config:"account" `
Key string ` config:"key" `
Endpoint string ` config:"endpoint" `
TestMode string ` config:"test_mode" `
Versions bool ` config:"versions" `
2022-03-15 11:55:20 +08:00
VersionAt fs . Time ` config:"version_at" `
2020-01-15 01:33:35 +08:00
HardDelete bool ` config:"hard_delete" `
UploadCutoff fs . SizeSuffix ` config:"upload_cutoff" `
2020-05-29 19:36:31 +08:00
CopyCutoff fs . SizeSuffix ` config:"copy_cutoff" `
2020-01-15 01:33:35 +08:00
ChunkSize fs . SizeSuffix ` config:"chunk_size" `
2023-08-19 22:32:45 +08:00
UploadConcurrency int ` config:"upload_concurrency" `
2020-01-15 01:33:35 +08:00
DisableCheckSum bool ` config:"disable_checksum" `
DownloadURL string ` config:"download_url" `
DownloadAuthorizationDuration fs . Duration ` config:"download_auth_duration" `
2023-09-17 00:01:43 +08:00
Lifecycle int ` config:"lifecycle" `
2020-01-15 01:33:35 +08:00
Enc encoder . MultiEncoder ` config:"encoding" `
2015-11-28 02:25:52 +08:00
}
// Fs represents a remote b2 server
type Fs struct {
2019-08-09 22:19:02 +08:00
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
2020-11-05 19:33:32 +08:00
ci * fs . ConfigInfo // global config
2019-08-09 22:19:02 +08:00
features * fs . Features // optional features
srv * rest . Client // the connection to the b2 server
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache * bucket . Cache // cache for bucket creation status
bucketIDMutex sync . Mutex // mutex to protect _bucketID
_bucketID map [ string ] string // the ID of the bucket we are working on
bucketTypeMutex sync . Mutex // mutex to protect _bucketType
_bucketType map [ string ] string // the Type of the bucket we are working on
info api . AuthorizeAccountResponse // result of authorize call
uploadMu sync . Mutex // lock for upload variable
uploads map [ string ] [ ] * api . GetUploadURLResponse // Upload URLs by buckedID
authMu sync . Mutex // lock for authorizing the account
pacer * fs . Pacer // To pace and retry the API calls
2020-05-29 19:36:31 +08:00
uploadToken * pacer . TokenDispenser // control concurrency
2015-11-28 02:25:52 +08:00
}
// Object describes a b2 object
type Object struct {
2016-09-22 05:13:24 +08:00
fs * Fs // what this object is part of
remote string // The remote path
id string // b2 id of the file
modTime time . Time // The modified time of the object if known
sha1 string // SHA-1 hash if known
size int64 // Size of the object
mimeType string // Content-Type of the object
2015-11-28 02:25:52 +08:00
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func ( f * Fs ) Name ( ) string {
return f . name
}
// Root of the remote (as passed into NewFs)
func ( f * Fs ) Root ( ) string {
2019-08-09 22:19:02 +08:00
return f . root
2015-11-28 02:25:52 +08:00
}
// String converts this Fs to a string
func ( f * Fs ) String ( ) string {
2019-08-09 22:19:02 +08:00
if f . rootBucket == "" {
2022-06-09 04:25:17 +08:00
return "B2 root"
2019-08-09 22:19:02 +08:00
}
if f . rootDirectory == "" {
return fmt . Sprintf ( "B2 bucket %s" , f . rootBucket )
2015-11-28 02:25:52 +08:00
}
2019-08-09 22:19:02 +08:00
return fmt . Sprintf ( "B2 bucket %s path %s" , f . rootBucket , f . rootDirectory )
2015-11-28 02:25:52 +08:00
}
2017-01-14 01:21:47 +08:00
// Features returns the optional features of this Fs
func ( f * Fs ) Features ( ) * fs . Features {
return f . features
}
2019-08-09 22:19:02 +08:00
// parsePath parses a remote 'url'
func parsePath ( path string ) ( root string ) {
root = strings . Trim ( path , "/" )
2015-11-28 02:25:52 +08:00
return
}
2019-08-09 22:19:02 +08:00
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func ( f * Fs ) split ( rootRelativePath string ) ( bucketName , bucketPath string ) {
return bucket . Split ( path . Join ( f . root , rootRelativePath ) )
}
// split returns bucket and bucketPath from the object
func ( o * Object ) split ( ) ( bucket , bucketPath string ) {
return o . fs . split ( o . remote )
}
2016-02-24 06:15:20 +08:00
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = [ ] int {
2020-10-14 05:49:58 +08:00
401 , // Unauthorized (e.g. "Token has expired")
2016-02-24 06:15:20 +08:00
408 , // Request Timeout
429 , // Rate exceeded.
500 , // Get occasional 500 Internal Server Error
503 , // Service Unavailable
504 , // Gateway Time-out
}
// shouldRetryNoAuth returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
2021-03-11 22:44:01 +08:00
func ( f * Fs ) shouldRetryNoReauth ( ctx context . Context , resp * http . Response , err error ) ( bool , error ) {
if fserrors . ContextError ( ctx , & err ) {
return false , err
}
2016-07-01 23:23:23 +08:00
// For 429 or 503 errors look at the Retry-After: header and
// set the retry appropriately, starting with a minimum of 1
// second if it isn't set.
if resp != nil && ( resp . StatusCode == 429 || resp . StatusCode == 503 ) {
var retryAfter = 1
retryAfterString := resp . Header . Get ( retryAfterHeader )
if retryAfterString != "" {
var err error
retryAfter , err = strconv . Atoi ( retryAfterString )
if err != nil {
2017-02-09 19:01:20 +08:00
fs . Errorf ( f , "Malformed %s header %q: %v" , retryAfterHeader , retryAfterString , err )
2016-07-01 23:23:23 +08:00
}
}
2019-02-10 04:52:15 +08:00
return true , pacer . RetryAfterError ( err , time . Duration ( retryAfter ) * time . Second )
2016-07-01 23:23:23 +08:00
}
2018-01-13 00:30:54 +08:00
return fserrors . ShouldRetry ( err ) || fserrors . ShouldRetryHTTP ( resp , retryErrorCodes ) , err
2016-02-24 06:15:20 +08:00
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
2019-09-05 03:00:37 +08:00
func ( f * Fs ) shouldRetry ( ctx context . Context , resp * http . Response , err error ) ( bool , error ) {
2016-07-01 18:47:42 +08:00
if resp != nil && resp . StatusCode == 401 {
2017-02-09 19:01:20 +08:00
fs . Debugf ( f , "Unauthorized: %v" , err )
2016-02-24 06:15:20 +08:00
// Reauth
2019-09-05 03:00:37 +08:00
authErr := f . authorizeAccount ( ctx )
2016-02-24 06:15:20 +08:00
if authErr != nil {
err = authErr
}
return true , err
}
2021-03-11 22:44:01 +08:00
return f . shouldRetryNoReauth ( ctx , resp , err )
2016-02-24 06:15:20 +08:00
}
2015-11-28 02:25:52 +08:00
// errorHandler parses a non 2xx error response into an error
func errorHandler ( resp * http . Response ) error {
2023-10-28 22:12:40 +08:00
body , err := rest . ReadBody ( resp )
2015-11-28 02:25:52 +08:00
if err != nil {
2023-10-28 22:12:40 +08:00
fs . Errorf ( nil , "Couldn't read error out of body: %v" , err )
body = nil
}
// Decode error response if there was one - they can be blank
errResponse := new ( api . Error )
if len ( body ) > 0 {
err = json . Unmarshal ( body , errResponse )
if err != nil {
fs . Errorf ( nil , "Couldn't decode error response: %v" , err )
}
2015-11-28 02:25:52 +08:00
}
if errResponse . Code == "" {
errResponse . Code = "unknown"
}
if errResponse . Status == 0 {
errResponse . Status = resp . StatusCode
}
if errResponse . Message == "" {
errResponse . Message = "Unknown " + resp . Status
}
return errResponse
}
2018-09-07 19:02:27 +08:00
func checkUploadChunkSize ( cs fs . SizeSuffix ) error {
if cs < minChunkSize {
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "%s is less than %s" , cs , minChunkSize )
2018-09-07 19:02:27 +08:00
}
return nil
}
func ( f * Fs ) setUploadChunkSize ( cs fs . SizeSuffix ) ( old fs . SizeSuffix , err error ) {
err = checkUploadChunkSize ( cs )
if err == nil {
old , f . opt . ChunkSize = f . opt . ChunkSize , cs
2018-10-14 05:45:17 +08:00
}
return
}
func checkUploadCutoff ( opt * Options , cs fs . SizeSuffix ) error {
if cs < opt . ChunkSize {
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "%v is less than chunk size %v" , cs , opt . ChunkSize )
2018-10-14 05:45:17 +08:00
}
return nil
}
func ( f * Fs ) setUploadCutoff ( cs fs . SizeSuffix ) ( old fs . SizeSuffix , err error ) {
err = checkUploadCutoff ( & f . opt , cs )
if err == nil {
old , f . opt . UploadCutoff = f . opt . UploadCutoff , cs
2018-09-07 19:02:27 +08:00
}
return
}
2023-11-24 20:36:48 +08:00
func ( f * Fs ) setCopyCutoff ( cs fs . SizeSuffix ) ( old fs . SizeSuffix , err error ) {
err = checkUploadChunkSize ( cs )
if err == nil {
old , f . opt . CopyCutoff = f . opt . CopyCutoff , cs
}
return
}
2019-08-09 22:19:02 +08:00
// setRoot changes the root of the Fs
func ( f * Fs ) setRoot ( root string ) {
f . root = parsePath ( root )
f . rootBucket , f . rootDirectory = bucket . Split ( f . root )
}
2019-02-08 01:41:17 +08:00
// NewFs constructs an Fs from the path, bucket:path
2020-11-05 23:18:51 +08:00
func NewFs ( ctx context . Context , name , root string , m configmap . Mapper ) ( fs . Fs , error ) {
2018-05-15 01:06:57 +08:00
// Parse config into Options struct
opt := new ( Options )
err := configstruct . Set ( m , opt )
if err != nil {
return nil , err
}
2020-08-02 18:13:29 +08:00
if opt . UploadCutoff < opt . ChunkSize {
opt . UploadCutoff = opt . ChunkSize
fs . Infof ( nil , "b2: raising upload cutoff to chunk size: %v" , opt . UploadCutoff )
}
2018-10-14 05:45:17 +08:00
err = checkUploadCutoff ( opt , opt . UploadCutoff )
if err != nil {
2021-11-04 18:12:57 +08:00
return nil , fmt . Errorf ( "b2: upload cutoff: %w" , err )
2016-06-16 01:49:11 +08:00
}
2018-09-07 19:02:27 +08:00
err = checkUploadChunkSize ( opt . ChunkSize )
if err != nil {
2021-11-04 18:12:57 +08:00
return nil , fmt . Errorf ( "b2: chunk size: %w" , err )
2016-06-16 01:49:11 +08:00
}
2018-05-15 01:06:57 +08:00
if opt . Account == "" {
2015-11-28 02:25:52 +08:00
return nil , errors . New ( "account not found" )
}
2018-05-15 01:06:57 +08:00
if opt . Key == "" {
2015-11-28 02:25:52 +08:00
return nil , errors . New ( "key not found" )
}
2018-05-15 01:06:57 +08:00
if opt . Endpoint == "" {
opt . Endpoint = defaultEndpoint
}
2020-11-05 19:33:32 +08:00
ci := fs . GetConfig ( ctx )
2016-02-24 05:19:33 +08:00
f := & Fs {
2019-08-09 22:19:02 +08:00
name : name ,
opt : * opt ,
2020-11-05 19:33:32 +08:00
ci : ci ,
2020-11-13 23:24:43 +08:00
srv : rest . NewClient ( fshttp . NewClient ( ctx ) ) . SetErrorHandler ( errorHandler ) ,
2019-08-09 22:19:02 +08:00
cache : bucket . NewCache ( ) ,
_bucketID : make ( map [ string ] string , 1 ) ,
_bucketType : make ( map [ string ] string , 1 ) ,
uploads : make ( map [ string ] [ ] * api . GetUploadURLResponse ) ,
2020-11-05 19:33:32 +08:00
pacer : fs . NewPacer ( ctx , pacer . NewDefault ( pacer . MinSleep ( minSleep ) , pacer . MaxSleep ( maxSleep ) , pacer . DecayConstant ( decayConstant ) ) ) ,
uploadToken : pacer . NewTokenDispenser ( ci . Transfers ) ,
2019-08-09 22:19:02 +08:00
}
f . setRoot ( root )
2017-08-09 22:27:43 +08:00
f . features = ( & fs . Features {
2023-10-09 01:09:09 +08:00
ReadMimeType : true ,
WriteMimeType : true ,
BucketBased : true ,
BucketBasedRootOK : true ,
ChunkWriterDoesntSeek : true ,
2020-11-06 00:00:40 +08:00
} ) . Fill ( ctx , f )
2016-07-01 18:30:09 +08:00
// Set the test flag if required
2018-05-15 01:06:57 +08:00
if opt . TestMode != "" {
testMode := strings . TrimSpace ( opt . TestMode )
2016-07-01 18:30:09 +08:00
f . srv . SetHeader ( testModeHeader , testMode )
2017-02-09 19:01:20 +08:00
fs . Debugf ( f , "Setting test header \"%s: %s\"" , testModeHeader , testMode )
2016-07-01 18:30:09 +08:00
}
2019-09-05 03:00:37 +08:00
err = f . authorizeAccount ( ctx )
2016-02-24 06:15:20 +08:00
if err != nil {
2021-11-04 18:12:57 +08:00
return nil , fmt . Errorf ( "failed to authorize account: %w" , err )
2015-11-28 02:25:52 +08:00
}
2018-08-19 02:05:32 +08:00
// If this is a key limited to a single bucket, it must exist already
2019-08-09 22:19:02 +08:00
if f . rootBucket != "" && f . info . Allowed . BucketID != "" {
2020-01-15 01:33:35 +08:00
allowedBucket := f . opt . Enc . ToStandardName ( f . info . Allowed . BucketName )
2018-12-14 18:10:13 +08:00
if allowedBucket == "" {
return nil , errors . New ( "bucket that application key is restricted to no longer exists" )
}
2019-08-09 22:19:02 +08:00
if allowedBucket != f . rootBucket {
2021-11-04 18:12:57 +08:00
return nil , fmt . Errorf ( "you must use bucket %q with this application key" , allowedBucket )
2018-12-14 18:10:13 +08:00
}
2019-08-09 22:19:02 +08:00
f . cache . MarkOK ( f . rootBucket )
f . setBucketID ( f . rootBucket , f . info . Allowed . BucketID )
2018-08-19 02:05:32 +08:00
}
2019-08-09 22:19:02 +08:00
if f . rootBucket != "" && f . rootDirectory != "" {
2015-11-28 02:25:52 +08:00
// Check to see if the (bucket,directory) is actually an existing file
oldRoot := f . root
2019-08-09 22:19:02 +08:00
newRoot , leaf := path . Split ( oldRoot )
f . setRoot ( newRoot )
_ , err := f . NewObject ( ctx , leaf )
2016-06-26 04:23:20 +08:00
if err != nil {
2021-02-09 17:58:44 +08:00
// File doesn't exist so return old f
f . setRoot ( oldRoot )
return f , nil
2015-11-28 02:25:52 +08:00
}
2016-06-26 04:23:20 +08:00
// return an error with an fs which points to the parent
return f , fs . ErrorIsFile
2015-11-28 02:25:52 +08:00
}
return f , nil
}
2016-02-24 05:19:33 +08:00
// authorizeAccount gets the API endpoint and auth token. Can be used
// for reauthentication too.
2019-09-05 03:00:37 +08:00
func ( f * Fs ) authorizeAccount ( ctx context . Context ) error {
2016-02-24 05:19:33 +08:00
f . authMu . Lock ( )
defer f . authMu . Unlock ( )
opts := rest . Opts {
Method : "GET" ,
2017-07-07 15:18:13 +08:00
Path : "/b2api/v1/b2_authorize_account" ,
2018-05-15 01:06:57 +08:00
RootURL : f . opt . Endpoint ,
UserName : f . opt . Account ,
Password : f . opt . Key ,
2016-02-24 05:19:33 +08:00
ExtraHeaders : map [ string ] string { "Authorization" : "" } , // unset the Authorization for this request
}
2016-02-24 06:15:20 +08:00
err := f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err := f . srv . CallJSON ( ctx , & opts , nil , & f . info )
2021-03-11 22:44:01 +08:00
return f . shouldRetryNoReauth ( ctx , resp , err )
2016-02-24 06:15:20 +08:00
} )
2016-02-24 05:19:33 +08:00
if err != nil {
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "failed to authenticate: %w" , err )
2016-02-24 05:19:33 +08:00
}
f . srv . SetRoot ( f . info . APIURL + "/b2api/v1" ) . SetHeader ( "Authorization" , f . info . AuthorizationToken )
return nil
}
2019-07-04 08:19:06 +08:00
// hasPermission returns if the current AuthorizationToken has the selected permission
func ( f * Fs ) hasPermission ( permission string ) bool {
for _ , capability := range f . info . Allowed . Capabilities {
if capability == permission {
return true
}
}
return false
}
2016-02-27 21:00:35 +08:00
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
//
// This should be returned with returnUploadURL when finished
2019-09-05 03:00:37 +08:00
func ( f * Fs ) getUploadURL ( ctx context . Context , bucket string ) ( upload * api . GetUploadURLResponse , err error ) {
2015-11-28 02:25:52 +08:00
f . uploadMu . Lock ( )
defer f . uploadMu . Unlock ( )
2019-09-05 03:00:37 +08:00
bucketID , err := f . getBucketID ( ctx , bucket )
2015-11-28 02:25:52 +08:00
if err != nil {
2016-02-27 21:00:35 +08:00
return nil , err
2015-11-28 02:25:52 +08:00
}
2019-08-09 22:19:02 +08:00
// look for a stored upload URL for the correct bucketID
uploads := f . uploads [ bucketID ]
if len ( uploads ) > 0 {
upload , uploads = uploads [ 0 ] , uploads [ 1 : ]
f . uploads [ bucketID ] = uploads
return upload , nil
}
// get a new upload URL since not found
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_get_upload_url" ,
}
var request = api . GetUploadURLRequest {
BucketID : bucketID ,
}
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err := f . srv . CallJSON ( ctx , & opts , & request , & upload )
return f . shouldRetry ( ctx , resp , err )
2019-08-09 22:19:02 +08:00
} )
if err != nil {
2021-11-04 18:12:57 +08:00
return nil , fmt . Errorf ( "failed to get upload URL: %w" , err )
2015-11-28 02:25:52 +08:00
}
2016-02-27 21:00:35 +08:00
return upload , nil
}
// returnUploadURL returns the UploadURL to the cache
func ( f * Fs ) returnUploadURL ( upload * api . GetUploadURLResponse ) {
2016-06-16 01:49:11 +08:00
if upload == nil {
return
}
2016-02-27 21:00:35 +08:00
f . uploadMu . Lock ( )
2019-08-09 22:19:02 +08:00
f . uploads [ upload . BucketID ] = append ( f . uploads [ upload . BucketID ] , upload )
2016-02-27 21:00:35 +08:00
f . uploadMu . Unlock ( )
2015-11-28 02:25:52 +08:00
}
// clearUploadURL clears the current UploadURL and the AuthorizationToken
2019-08-09 22:19:02 +08:00
func ( f * Fs ) clearUploadURL ( bucketID string ) {
2015-11-28 02:25:52 +08:00
f . uploadMu . Lock ( )
2019-08-09 22:19:02 +08:00
delete ( f . uploads , bucketID )
2016-02-27 21:00:35 +08:00
f . uploadMu . Unlock ( )
2015-11-28 02:25:52 +08:00
}
2023-08-19 22:32:45 +08:00
// getRW gets a RW buffer and an upload token
2020-05-29 19:36:31 +08:00
//
// If noBuf is set then it just gets an upload token
2023-08-19 22:32:45 +08:00
func ( f * Fs ) getRW ( noBuf bool ) ( rw * pool . RW ) {
2020-05-29 19:36:31 +08:00
f . uploadToken . Get ( )
if ! noBuf {
2023-08-19 22:32:45 +08:00
rw = multipart . NewRW ( )
2016-10-10 22:57:56 +08:00
}
2023-08-19 22:32:45 +08:00
return rw
2016-10-10 22:57:56 +08:00
}
2023-08-19 22:32:45 +08:00
// putRW returns a RW buffer to the memory pool and returns an upload
// token
2020-05-29 19:36:31 +08:00
//
2023-08-19 22:32:45 +08:00
// If buf is nil then it just returns the upload token
func ( f * Fs ) putRW ( rw * pool . RW ) {
if rw != nil {
_ = rw . Close ( )
2016-10-10 22:57:56 +08:00
}
2020-05-29 19:36:31 +08:00
f . uploadToken . Put ( )
2016-10-10 22:57:56 +08:00
}
2016-06-26 04:58:34 +08:00
// Return an Object from a path
2015-11-28 02:25:52 +08:00
//
2016-06-26 04:23:20 +08:00
// If it can't be found it returns the error fs.ErrorObjectNotFound.
2019-06-17 16:34:30 +08:00
func ( f * Fs ) newObjectWithInfo ( ctx context . Context , remote string , info * api . File ) ( fs . Object , error ) {
2015-11-28 02:25:52 +08:00
o := & Object {
fs : f ,
remote : remote ,
}
if info != nil {
2016-03-22 22:39:56 +08:00
err := o . decodeMetaData ( info )
if err != nil {
2019-08-03 00:20:45 +08:00
return nil , err
2016-03-22 22:39:56 +08:00
}
2015-11-28 02:25:52 +08:00
} else {
2019-06-17 16:34:30 +08:00
err := o . readMetaData ( ctx ) // reads info and headers, returning an error
2015-11-28 02:25:52 +08:00
if err != nil {
2019-08-03 00:20:45 +08:00
return nil , err
2015-11-28 02:25:52 +08:00
}
}
2016-06-26 04:23:20 +08:00
return o , nil
2015-11-28 02:25:52 +08:00
}
2016-06-26 04:23:20 +08:00
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
2019-06-17 16:34:30 +08:00
func ( f * Fs ) NewObject ( ctx context . Context , remote string ) ( fs . Object , error ) {
return f . newObjectWithInfo ( ctx , remote , nil )
2015-11-28 02:25:52 +08:00
}
// listFn is called from list to handle an object
2016-04-22 03:06:21 +08:00
type listFn func ( remote string , object * api . File , isDirectory bool ) error
2015-11-28 02:25:52 +08:00
2016-02-19 20:09:11 +08:00
// errEndList is a sentinel used to end the list iteration now.
// listFn should return it to end the iteration with no errors.
var errEndList = errors . New ( "end list" )
2015-11-28 02:25:52 +08:00
// list lists the objects into the function supplied from
// the bucket and root supplied
//
2019-08-09 22:19:02 +08:00
// (bucket, directory) is the starting directory
//
2022-08-05 23:35:41 +08:00
// If prefix is set then it is removed from all file names.
2016-12-15 01:37:26 +08:00
//
2019-08-09 22:19:02 +08:00
// If addBucket is set then it adds the bucket to the start of the
2022-08-05 23:35:41 +08:00
// remotes generated.
2016-04-22 03:06:21 +08:00
//
2022-08-05 23:35:41 +08:00
// If recurse is set the function will recursively list.
2015-11-28 02:25:52 +08:00
//
// If limit is > 0 then it limits to that many files (must be less
2022-08-05 23:35:41 +08:00
// than 1000).
2015-11-28 02:25:52 +08:00
//
// If hidden is set then it will list the hidden (deleted) files too.
2019-08-09 22:19:02 +08:00
//
// if findFile is set it will look for files called (bucket, directory)
func ( f * Fs ) list ( ctx context . Context , bucket , directory , prefix string , addBucket bool , recurse bool , limit int , hidden bool , findFile bool , fn listFn ) error {
if ! findFile {
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
}
2016-04-24 04:46:52 +08:00
}
2016-12-15 01:37:26 +08:00
delimiter := ""
2017-06-12 05:43:31 +08:00
if ! recurse {
2016-12-15 01:37:26 +08:00
delimiter = "/"
}
2019-09-05 03:00:37 +08:00
bucketID , err := f . getBucketID ( ctx , bucket )
2015-11-28 02:25:52 +08:00
if err != nil {
return err
}
chunkSize := 1000
if limit > 0 {
chunkSize = limit
}
var request = api . ListFileNamesRequest {
BucketID : bucketID ,
MaxFileCount : chunkSize ,
2020-01-15 01:33:35 +08:00
Prefix : f . opt . Enc . FromStandardPath ( directory ) ,
2016-12-15 01:37:26 +08:00
Delimiter : delimiter ,
2015-11-28 02:25:52 +08:00
}
2019-08-09 22:19:02 +08:00
if directory != "" {
2020-01-15 01:33:35 +08:00
request . StartFileName = f . opt . Enc . FromStandardPath ( directory )
2015-11-28 02:25:52 +08:00
}
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_list_file_names" ,
}
2022-03-15 11:55:20 +08:00
if hidden || f . opt . VersionAt . IsSet ( ) {
2015-11-28 02:25:52 +08:00
opts . Path = "/b2_list_file_versions"
}
2022-03-15 11:55:20 +08:00
lastFileName := ""
2015-11-28 02:25:52 +08:00
for {
2017-03-01 15:57:10 +08:00
var response api . ListFileNamesResponse
2016-02-24 06:15:20 +08:00
err := f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err := f . srv . CallJSON ( ctx , & opts , & request , & response )
return f . shouldRetry ( ctx , resp , err )
2016-02-24 06:15:20 +08:00
} )
2015-11-28 02:25:52 +08:00
if err != nil {
return err
}
for i := range response . Files {
file := & response . Files [ i ]
2020-01-15 01:33:35 +08:00
file . Name = f . opt . Enc . ToStandardPath ( file . Name )
2015-11-28 02:25:52 +08:00
// Finish if file name no longer has prefix
2016-12-15 01:37:26 +08:00
if prefix != "" && ! strings . HasPrefix ( file . Name , prefix ) {
2015-11-28 02:25:52 +08:00
return nil
}
2019-08-09 22:19:02 +08:00
if ! strings . HasPrefix ( file . Name , prefix ) {
2017-02-10 01:08:51 +08:00
fs . Debugf ( f , "Odd name received %q" , file . Name )
2016-12-15 01:37:26 +08:00
continue
}
2019-08-09 22:19:02 +08:00
remote := file . Name [ len ( prefix ) : ]
2016-12-15 01:37:26 +08:00
// Check for directory
2020-03-31 18:45:09 +08:00
isDirectory := remote == "" || strings . HasSuffix ( remote , "/" )
2021-01-26 01:00:43 +08:00
if isDirectory && len ( remote ) > 1 {
2016-12-15 01:37:26 +08:00
remote = remote [ : len ( remote ) - 1 ]
2016-04-22 03:06:21 +08:00
}
2019-08-09 22:19:02 +08:00
if addBucket {
remote = path . Join ( bucket , remote )
}
2022-03-15 11:55:20 +08:00
if f . opt . VersionAt . IsSet ( ) {
if time . Time ( file . UploadTimestamp ) . After ( time . Time ( f . opt . VersionAt ) ) {
// Ignore versions that were created after the specified time
continue
}
if file . Name == lastFileName {
// Ignore versions before the already returned version
continue
}
}
2016-12-15 01:37:26 +08:00
// Send object
2022-03-15 11:55:20 +08:00
lastFileName = file . Name
2016-12-15 01:37:26 +08:00
err = fn ( remote , file , isDirectory )
if err != nil {
if err == errEndList {
return nil
2016-03-22 22:39:56 +08:00
}
2016-12-15 01:37:26 +08:00
return err
2015-11-28 02:25:52 +08:00
}
}
// end if no NextFileName
if response . NextFileName == nil {
break
}
request . StartFileName = * response . NextFileName
if response . NextFileID != nil {
request . StartFileID = * response . NextFileID
}
}
return nil
}
2017-06-30 17:54:14 +08:00
// Convert a list item into a DirEntry
2019-06-17 16:34:30 +08:00
func ( f * Fs ) itemToDirEntry ( ctx context . Context , remote string , object * api . File , isDirectory bool , last * string ) ( fs . DirEntry , error ) {
2017-06-12 05:43:31 +08:00
if isDirectory {
2017-06-30 20:37:29 +08:00
d := fs . NewDir ( remote , time . Time { } )
2017-06-12 05:43:31 +08:00
return d , nil
}
if remote == * last {
remote = object . UploadTimestamp . AddVersion ( remote )
} else {
* last = remote
}
// hide objects represent deleted files which we don't list
if object . Action == "hide" {
return nil , nil
}
2019-06-17 16:34:30 +08:00
o , err := f . newObjectWithInfo ( ctx , remote , object )
2017-06-12 05:43:31 +08:00
if err != nil {
return nil , err
}
return o , nil
}
// listDir lists a single directory
2019-08-09 22:19:02 +08:00
func ( f * Fs ) listDir ( ctx context . Context , bucket , directory , prefix string , addBucket bool ) ( entries fs . DirEntries , err error ) {
2016-07-05 18:26:02 +08:00
last := ""
2019-08-09 22:19:02 +08:00
err = f . list ( ctx , bucket , directory , prefix , f . rootBucket == "" , false , 0 , f . opt . Versions , false , func ( remote string , object * api . File , isDirectory bool ) error {
2019-06-17 16:34:30 +08:00
entry , err := f . itemToDirEntry ( ctx , remote , object , isDirectory , & last )
2017-06-12 05:43:31 +08:00
if err != nil {
return err
}
if entry != nil {
entries = append ( entries , entry )
2016-04-22 03:06:21 +08:00
}
return nil
} )
if err != nil {
2017-06-12 05:43:31 +08:00
return nil , err
2016-04-22 03:06:21 +08:00
}
2018-03-01 20:11:34 +08:00
// bucket must be present if listing succeeded
2019-08-09 22:19:02 +08:00
f . cache . MarkOK ( bucket )
2017-06-12 05:43:31 +08:00
return entries , nil
2016-04-22 03:06:21 +08:00
}
// listBuckets returns all the buckets to out
2019-08-23 04:30:55 +08:00
func ( f * Fs ) listBuckets ( ctx context . Context ) ( entries fs . DirEntries , err error ) {
2023-09-16 23:00:50 +08:00
err = f . listBucketsToFn ( ctx , "" , func ( bucket * api . Bucket ) error {
2017-06-30 20:37:29 +08:00
d := fs . NewDir ( bucket . Name , time . Time { } )
2017-06-12 05:43:31 +08:00
entries = append ( entries , d )
2016-04-22 03:06:21 +08:00
return nil
} )
if err != nil {
2017-06-12 05:43:31 +08:00
return nil , err
2016-04-22 03:06:21 +08:00
}
2017-06-12 05:43:31 +08:00
return entries , nil
2016-04-22 03:06:21 +08:00
}
2017-06-12 05:43:31 +08:00
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
2019-06-17 16:34:30 +08:00
func ( f * Fs ) List ( ctx context . Context , dir string ) ( entries fs . DirEntries , err error ) {
2019-08-09 22:19:02 +08:00
bucket , directory := f . split ( dir )
if bucket == "" {
2019-08-23 04:30:55 +08:00
if directory != "" {
return nil , fs . ErrorListBucketRequired
}
return f . listBuckets ( ctx )
2015-11-28 02:25:52 +08:00
}
2019-08-09 22:19:02 +08:00
return f . listDir ( ctx , bucket , directory , f . rootDirectory , f . rootBucket == "" )
2015-11-28 02:25:52 +08:00
}
2017-06-05 23:14:24 +08:00
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
2017-06-12 05:43:31 +08:00
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
2019-06-17 16:34:30 +08:00
func ( f * Fs ) ListR ( ctx context . Context , dir string , callback fs . ListRCallback ) ( err error ) {
2019-08-09 22:19:02 +08:00
bucket , directory := f . split ( dir )
2018-01-13 00:30:54 +08:00
list := walk . NewListRHelper ( callback )
2019-08-09 22:19:02 +08:00
listR := func ( bucket , directory , prefix string , addBucket bool ) error {
last := ""
return f . list ( ctx , bucket , directory , prefix , addBucket , true , 0 , f . opt . Versions , false , func ( remote string , object * api . File , isDirectory bool ) error {
entry , err := f . itemToDirEntry ( ctx , remote , object , isDirectory , & last )
if err != nil {
return err
}
return list . Add ( entry )
} )
}
if bucket == "" {
2019-08-23 04:30:55 +08:00
entries , err := f . listBuckets ( ctx )
2019-08-09 22:19:02 +08:00
if err != nil {
return err
}
for _ , entry := range entries {
err = list . Add ( entry )
if err != nil {
return err
}
bucket := entry . Remote ( )
err = listR ( bucket , "" , f . rootDirectory , true )
if err != nil {
return err
}
2019-08-23 04:30:55 +08:00
// bucket must be present if listing succeeded
f . cache . MarkOK ( bucket )
2019-08-09 22:19:02 +08:00
}
} else {
err = listR ( bucket , directory , f . rootDirectory , f . rootBucket == "" )
2017-06-12 05:43:31 +08:00
if err != nil {
return err
}
2019-08-23 04:30:55 +08:00
// bucket must be present if listing succeeded
f . cache . MarkOK ( bucket )
2017-06-12 05:43:31 +08:00
}
return list . Flush ( )
2017-06-05 23:14:24 +08:00
}
2016-04-22 03:06:21 +08:00
// listBucketFn is called from listBucketsToFn to handle a bucket
type listBucketFn func ( * api . Bucket ) error
2015-11-28 02:25:52 +08:00
2016-04-22 03:06:21 +08:00
// listBucketsToFn lists the buckets to the function supplied
2023-09-16 23:00:50 +08:00
func ( f * Fs ) listBucketsToFn ( ctx context . Context , bucketName string , fn listBucketFn ) error {
2018-08-01 21:33:01 +08:00
var account = api . ListBucketsRequest {
AccountID : f . info . AccountID ,
BucketID : f . info . Allowed . BucketID ,
}
2023-09-16 23:00:50 +08:00
if bucketName != "" && account . BucketID == "" {
account . BucketName = f . opt . Enc . FromStandardName ( bucketName )
}
2018-08-01 21:33:01 +08:00
2015-11-28 02:25:52 +08:00
var response api . ListBucketsResponse
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_list_buckets" ,
}
2016-02-24 06:15:20 +08:00
err := f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err := f . srv . CallJSON ( ctx , & opts , & account , & response )
return f . shouldRetry ( ctx , resp , err )
2016-02-24 06:15:20 +08:00
} )
2015-11-28 02:25:52 +08:00
if err != nil {
return err
}
2019-08-09 22:19:02 +08:00
f . bucketIDMutex . Lock ( )
f . bucketTypeMutex . Lock ( )
f . _bucketID = make ( map [ string ] string , 1 )
f . _bucketType = make ( map [ string ] string , 1 )
2015-11-28 02:25:52 +08:00
for i := range response . Buckets {
2019-08-09 22:19:02 +08:00
bucket := & response . Buckets [ i ]
2020-01-15 01:33:35 +08:00
bucket . Name = f . opt . Enc . ToStandardName ( bucket . Name )
2019-08-09 22:19:02 +08:00
f . cache . MarkOK ( bucket . Name )
f . _bucketID [ bucket . Name ] = bucket . ID
f . _bucketType [ bucket . Name ] = bucket . Type
}
f . bucketTypeMutex . Unlock ( )
f . bucketIDMutex . Unlock ( )
for i := range response . Buckets {
bucket := & response . Buckets [ i ]
err = fn ( bucket )
2016-04-22 03:06:21 +08:00
if err != nil {
return err
}
2015-11-28 02:25:52 +08:00
}
return nil
}
2019-07-01 04:51:59 +08:00
// getbucketType finds the bucketType for the current bucket name
// can be one of allPublic. allPrivate, or snapshot
2019-09-05 03:00:37 +08:00
func ( f * Fs ) getbucketType ( ctx context . Context , bucket string ) ( bucketType string , err error ) {
2019-07-01 04:51:59 +08:00
f . bucketTypeMutex . Lock ( )
2019-08-09 22:19:02 +08:00
bucketType = f . _bucketType [ bucket ]
f . bucketTypeMutex . Unlock ( )
if bucketType != "" {
return bucketType , nil
2019-07-01 04:51:59 +08:00
}
2023-09-16 23:00:50 +08:00
err = f . listBucketsToFn ( ctx , bucket , func ( bucket * api . Bucket ) error {
2019-08-09 22:19:02 +08:00
// listBucketsToFn reads bucket Types
2019-07-01 04:51:59 +08:00
return nil
} )
2019-08-09 22:19:02 +08:00
f . bucketTypeMutex . Lock ( )
bucketType = f . _bucketType [ bucket ]
f . bucketTypeMutex . Unlock ( )
2019-07-01 04:51:59 +08:00
if bucketType == "" {
err = fs . ErrorDirNotFound
}
return bucketType , err
}
// setBucketType sets the Type for the current bucket name
2019-08-09 22:19:02 +08:00
func ( f * Fs ) setBucketType ( bucket string , Type string ) {
2019-07-01 04:51:59 +08:00
f . bucketTypeMutex . Lock ( )
2019-08-09 22:19:02 +08:00
f . _bucketType [ bucket ] = Type
2019-07-01 04:51:59 +08:00
f . bucketTypeMutex . Unlock ( )
}
// clearBucketType clears the Type for the current bucket name
2019-08-09 22:19:02 +08:00
func ( f * Fs ) clearBucketType ( bucket string ) {
2019-07-01 04:51:59 +08:00
f . bucketTypeMutex . Lock ( )
2019-08-09 22:19:02 +08:00
delete ( f . _bucketType , bucket )
2019-07-01 04:51:59 +08:00
f . bucketTypeMutex . Unlock ( )
}
2015-11-28 02:25:52 +08:00
// getBucketID finds the ID for the current bucket name
2019-09-05 03:00:37 +08:00
func ( f * Fs ) getBucketID ( ctx context . Context , bucket string ) ( bucketID string , err error ) {
2015-11-28 02:25:52 +08:00
f . bucketIDMutex . Lock ( )
2019-08-09 22:19:02 +08:00
bucketID = f . _bucketID [ bucket ]
f . bucketIDMutex . Unlock ( )
if bucketID != "" {
return bucketID , nil
2015-11-28 02:25:52 +08:00
}
2023-09-16 23:00:50 +08:00
err = f . listBucketsToFn ( ctx , bucket , func ( bucket * api . Bucket ) error {
2019-08-09 22:19:02 +08:00
// listBucketsToFn sets IDs
2016-04-22 03:06:21 +08:00
return nil
2015-11-28 02:25:52 +08:00
} )
2019-08-09 22:19:02 +08:00
f . bucketIDMutex . Lock ( )
bucketID = f . _bucketID [ bucket ]
f . bucketIDMutex . Unlock ( )
2015-11-28 02:25:52 +08:00
if bucketID == "" {
2016-06-12 22:06:02 +08:00
err = fs . ErrorDirNotFound
2015-11-28 02:25:52 +08:00
}
return bucketID , err
}
// setBucketID sets the ID for the current bucket name
2019-08-09 22:19:02 +08:00
func ( f * Fs ) setBucketID ( bucket , ID string ) {
2015-11-28 02:25:52 +08:00
f . bucketIDMutex . Lock ( )
2019-08-09 22:19:02 +08:00
f . _bucketID [ bucket ] = ID
2015-11-28 02:25:52 +08:00
f . bucketIDMutex . Unlock ( )
}
// clearBucketID clears the ID for the current bucket name
2019-08-09 22:19:02 +08:00
func ( f * Fs ) clearBucketID ( bucket string ) {
2015-11-28 02:25:52 +08:00
f . bucketIDMutex . Lock ( )
2019-08-09 22:19:02 +08:00
delete ( f . _bucketID , bucket )
2015-11-28 02:25:52 +08:00
f . bucketIDMutex . Unlock ( )
}
// Put the object into the bucket
//
2022-08-05 23:35:41 +08:00
// Copy the reader in to the new object which is returned.
2015-11-28 02:25:52 +08:00
//
// The new object may have been created if an error is returned
2019-06-17 16:34:30 +08:00
func ( f * Fs ) Put ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2015-11-28 02:25:52 +08:00
// Temporary Object under construction
fs := & Object {
fs : f ,
2016-02-18 19:35:25 +08:00
remote : src . Remote ( ) ,
2015-11-28 02:25:52 +08:00
}
2019-06-17 16:34:30 +08:00
return fs , fs . Update ( ctx , in , src , options ... )
2015-11-28 02:25:52 +08:00
}
2017-09-17 04:43:48 +08:00
// PutStream uploads to the remote path with the modTime given of indeterminate size
2019-06-17 16:34:30 +08:00
func ( f * Fs ) PutStream ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
return f . Put ( ctx , in , src , options ... )
2017-09-17 04:43:48 +08:00
}
2015-11-28 02:25:52 +08:00
// Mkdir creates the bucket if it doesn't exist
2019-06-17 16:34:30 +08:00
func ( f * Fs ) Mkdir ( ctx context . Context , dir string ) error {
2019-08-09 22:19:02 +08:00
bucket , _ := f . split ( dir )
2019-08-23 04:30:55 +08:00
return f . makeBucket ( ctx , bucket )
}
// makeBucket creates the bucket if it doesn't exist
func ( f * Fs ) makeBucket ( ctx context . Context , bucket string ) error {
2019-08-09 22:19:02 +08:00
return f . cache . Create ( bucket , func ( ) error {
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_create_bucket" ,
}
var request = api . CreateBucketRequest {
AccountID : f . info . AccountID ,
2020-01-15 01:33:35 +08:00
Name : f . opt . Enc . FromStandardName ( bucket ) ,
2019-08-09 22:19:02 +08:00
Type : "allPrivate" ,
}
2023-09-17 00:01:43 +08:00
if f . opt . Lifecycle > 0 {
request . LifecycleRules = [ ] api . LifecycleRule { {
DaysFromHidingToDeleting : & f . opt . Lifecycle ,
} }
}
2019-08-09 22:19:02 +08:00
var response api . Bucket
err := f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err := f . srv . CallJSON ( ctx , & opts , & request , & response )
return f . shouldRetry ( ctx , resp , err )
2019-08-09 22:19:02 +08:00
} )
if err != nil {
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . Code == "duplicate_bucket_name" {
// Check this is our bucket - buckets are globally unique and this
// might be someone elses.
2019-09-05 03:00:37 +08:00
_ , getBucketErr := f . getBucketID ( ctx , bucket )
2019-08-09 22:19:02 +08:00
if getBucketErr == nil {
// found so it is our bucket
return nil
}
if getBucketErr != fs . ErrorDirNotFound {
fs . Debugf ( f , "Error checking bucket exists: %v" , getBucketErr )
}
2016-08-26 04:43:43 +08:00
}
2015-11-28 02:25:52 +08:00
}
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "failed to create bucket: %w" , err )
2015-11-28 02:25:52 +08:00
}
2019-08-09 22:19:02 +08:00
f . setBucketID ( bucket , response . ID )
f . setBucketType ( bucket , response . Type )
return nil
} , nil )
2015-11-28 02:25:52 +08:00
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty
2019-06-17 16:34:30 +08:00
func ( f * Fs ) Rmdir ( ctx context . Context , dir string ) error {
2019-08-09 22:19:02 +08:00
bucket , directory := f . split ( dir )
if bucket == "" || directory != "" {
2015-11-28 02:25:52 +08:00
return nil
}
2019-08-09 22:19:02 +08:00
return f . cache . Remove ( bucket , func ( ) error {
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_delete_bucket" ,
}
2019-09-05 03:00:37 +08:00
bucketID , err := f . getBucketID ( ctx , bucket )
2019-08-09 22:19:02 +08:00
if err != nil {
return err
}
var request = api . DeleteBucketRequest {
ID : bucketID ,
AccountID : f . info . AccountID ,
}
var response api . Bucket
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err := f . srv . CallJSON ( ctx , & opts , & request , & response )
return f . shouldRetry ( ctx , resp , err )
2019-08-09 22:19:02 +08:00
} )
if err != nil {
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "failed to delete bucket: %w" , err )
2019-08-09 22:19:02 +08:00
}
f . clearBucketID ( bucket )
f . clearBucketType ( bucket )
f . clearUploadURL ( bucketID )
return nil
2016-02-24 06:15:20 +08:00
} )
2015-11-28 02:25:52 +08:00
}
// Precision of the remote
func ( f * Fs ) Precision ( ) time . Duration {
2016-03-24 23:23:27 +08:00
return time . Millisecond
2015-11-28 02:25:52 +08:00
}
2017-07-23 20:02:42 +08:00
// hide hides a file on the remote
2019-09-05 03:00:37 +08:00
func ( f * Fs ) hide ( ctx context . Context , bucket , bucketPath string ) error {
bucketID , err := f . getBucketID ( ctx , bucket )
2017-07-23 20:02:42 +08:00
if err != nil {
return err
}
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_hide_file" ,
}
var request = api . HideFileRequest {
BucketID : bucketID ,
2020-01-15 01:33:35 +08:00
Name : f . opt . Enc . FromStandardPath ( bucketPath ) ,
2017-07-23 20:02:42 +08:00
}
var response api . File
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err := f . srv . CallJSON ( ctx , & opts , & request , & response )
return f . shouldRetry ( ctx , resp , err )
2017-07-23 20:02:42 +08:00
} )
if err != nil {
2019-03-17 22:31:30 +08:00
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . Code == "already_hidden" {
// sometimes eventual consistency causes this, so
// ignore this error since it is harmless
return nil
}
}
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "failed to hide %q: %w" , bucketPath , err )
2017-07-23 20:02:42 +08:00
}
return nil
}
2015-11-28 02:25:52 +08:00
// deleteByID deletes a file version given Name and ID
2019-09-05 03:00:37 +08:00
func ( f * Fs ) deleteByID ( ctx context . Context , ID , Name string ) error {
2015-11-28 02:25:52 +08:00
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_delete_file_version" ,
}
var request = api . DeleteFileRequest {
ID : ID ,
2020-01-15 01:33:35 +08:00
Name : f . opt . Enc . FromStandardPath ( Name ) ,
2015-11-28 02:25:52 +08:00
}
var response api . File
2016-02-24 06:15:20 +08:00
err := f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err := f . srv . CallJSON ( ctx , & opts , & request , & response )
return f . shouldRetry ( ctx , resp , err )
2016-02-24 06:15:20 +08:00
} )
2015-11-28 02:25:52 +08:00
if err != nil {
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "failed to delete %q: %w" , Name , err )
2015-11-28 02:25:52 +08:00
}
return nil
}
2016-07-03 00:03:08 +08:00
// purge deletes all the files and directories
//
// if oldOnly is true then it deletes only non current files.
2015-11-28 02:25:52 +08:00
//
// Implemented here so we can make sure we delete old versions.
2024-03-01 08:49:34 +08:00
func ( f * Fs ) purge ( ctx context . Context , dir string , oldOnly bool , deleteHidden bool , deleteUnfinished bool , maxAge time . Duration ) error {
2020-06-05 05:25:14 +08:00
bucket , directory := f . split ( dir )
2019-08-09 22:19:02 +08:00
if bucket == "" {
return errors . New ( "can't purge from root" )
}
2015-11-28 02:25:52 +08:00
var errReturn error
var checkErrMutex sync . Mutex
var checkErr = func ( err error ) {
if err == nil {
return
}
checkErrMutex . Lock ( )
defer checkErrMutex . Unlock ( )
if errReturn == nil {
errReturn = err
}
}
2018-12-03 02:05:32 +08:00
var isUnfinishedUploadStale = func ( timestamp api . Timestamp ) bool {
2024-03-01 08:49:34 +08:00
return time . Since ( time . Time ( timestamp ) ) > maxAge
2018-12-03 02:05:32 +08:00
}
2015-11-28 02:25:52 +08:00
// Delete Config.Transfers in parallel
2020-11-05 19:33:32 +08:00
toBeDeleted := make ( chan * api . File , f . ci . Transfers )
2015-11-28 02:25:52 +08:00
var wg sync . WaitGroup
2020-11-05 19:33:32 +08:00
wg . Add ( f . ci . Transfers )
for i := 0 ; i < f . ci . Transfers ; i ++ {
2015-11-28 02:25:52 +08:00
go func ( ) {
defer wg . Done ( )
for object := range toBeDeleted {
2019-07-23 03:11:46 +08:00
oi , err := f . newObjectWithInfo ( ctx , object . Name , object )
if err != nil {
2019-08-03 00:20:45 +08:00
fs . Errorf ( object . Name , "Can't create object %v" , err )
continue
2019-07-23 03:11:46 +08:00
}
2023-02-06 18:30:22 +08:00
tr := accounting . Stats ( ctx ) . NewCheckingTransfer ( oi , "deleting" )
2019-09-05 03:00:37 +08:00
err = f . deleteByID ( ctx , object . ID , object . Name )
2019-07-23 03:11:46 +08:00
checkErr ( err )
2020-11-06 00:59:59 +08:00
tr . Done ( ctx , err )
2015-11-28 02:25:52 +08:00
}
} ( )
}
2024-03-01 08:49:34 +08:00
if oldOnly {
if deleteHidden && deleteUnfinished {
fs . Infof ( f , "cleaning bucket %q of all hidden files, and pending multipart uploads older than %v" , bucket , maxAge )
} else if deleteHidden {
fs . Infof ( f , "cleaning bucket %q of all hidden files" , bucket )
} else if deleteUnfinished {
fs . Infof ( f , "cleaning bucket %q of pending multipart uploads older than %v" , bucket , maxAge )
} else {
fs . Errorf ( f , "cleaning bucket %q of nothing. This should never happen!" , bucket )
return nil
}
} else {
fs . Infof ( f , "cleaning bucket %q of all files" , bucket )
}
2016-07-03 00:03:08 +08:00
last := ""
2019-08-09 22:19:02 +08:00
checkErr ( f . list ( ctx , bucket , directory , f . rootDirectory , f . rootBucket == "" , true , 0 , true , false , func ( remote string , object * api . File , isDirectory bool ) error {
2016-04-22 03:06:21 +08:00
if ! isDirectory {
2019-07-23 03:11:46 +08:00
oi , err := f . newObjectWithInfo ( ctx , object . Name , object )
if err != nil {
fs . Errorf ( object , "Can't create object %+v" , err )
}
2023-02-06 18:30:22 +08:00
tr := accounting . Stats ( ctx ) . NewCheckingTransfer ( oi , "checking" )
2016-07-03 00:03:08 +08:00
if oldOnly && last != remote {
2019-08-09 22:19:02 +08:00
// Check current version of the file
2024-03-01 08:49:34 +08:00
if deleteHidden && object . Action == "hide" {
2017-02-09 19:01:20 +08:00
fs . Debugf ( remote , "Deleting current version (id %q) as it is a hide marker" , object . ID )
2016-08-19 01:36:00 +08:00
toBeDeleted <- object
2024-03-01 08:49:34 +08:00
} else if deleteUnfinished && object . Action == "start" && isUnfinishedUploadStale ( object . UploadTimestamp ) {
2018-12-03 02:05:32 +08:00
fs . Debugf ( remote , "Deleting current version (id %q) as it is a start marker (upload started at %s)" , object . ID , time . Time ( object . UploadTimestamp ) . Local ( ) )
toBeDeleted <- object
2016-08-19 01:36:00 +08:00
} else {
2024-03-01 08:49:34 +08:00
fs . Debugf ( remote , "Not deleting current version (id %q) %q dated %v (%v ago)" , object . ID , object . Action , time . Time ( object . UploadTimestamp ) . Local ( ) , time . Since ( time . Time ( object . UploadTimestamp ) ) )
2016-08-19 01:36:00 +08:00
}
2016-07-03 00:03:08 +08:00
} else {
2017-02-09 19:01:20 +08:00
fs . Debugf ( remote , "Deleting (id %q)" , object . ID )
2016-07-03 00:03:08 +08:00
toBeDeleted <- object
}
last = remote
2020-11-06 00:59:59 +08:00
tr . Done ( ctx , nil )
2016-04-22 03:06:21 +08:00
}
2015-11-28 02:25:52 +08:00
return nil
} ) )
close ( toBeDeleted )
wg . Wait ( )
2016-07-03 00:03:08 +08:00
if ! oldOnly {
2020-06-05 05:25:14 +08:00
checkErr ( f . Rmdir ( ctx , dir ) )
2016-07-03 00:03:08 +08:00
}
2015-11-28 02:25:52 +08:00
return errReturn
}
2016-07-03 00:03:08 +08:00
// Purge deletes all the files and directories including the old versions.
2020-06-05 05:25:14 +08:00
func ( f * Fs ) Purge ( ctx context . Context , dir string ) error {
2024-03-01 08:49:34 +08:00
return f . purge ( ctx , dir , false , false , false , defaultMaxAge )
2016-07-03 00:03:08 +08:00
}
2024-03-01 08:49:34 +08:00
// CleanUp deletes all hidden files and pending multipart uploads older than 24 hours.
2019-06-17 16:34:30 +08:00
func ( f * Fs ) CleanUp ( ctx context . Context ) error {
2024-03-01 08:49:34 +08:00
return f . purge ( ctx , "" , true , true , true , defaultMaxAge )
}
// cleanUp deletes all hidden files and/or pending multipart uploads older than the specified age.
func ( f * Fs ) cleanUp ( ctx context . Context , deleteHidden bool , deleteUnfinished bool , maxAge time . Duration ) ( err error ) {
return f . purge ( ctx , "" , true , deleteHidden , deleteUnfinished , maxAge )
2016-07-03 00:03:08 +08:00
}
2020-10-14 05:43:40 +08:00
// copy does a server-side copy from dstObj <- srcObj
2019-05-21 22:25:16 +08:00
//
2020-05-29 19:36:31 +08:00
// If newInfo is nil then the metadata will be copied otherwise it
// will be replaced with newInfo
func ( f * Fs ) copy ( ctx context . Context , dstObj * Object , srcObj * Object , newInfo * api . File ) ( err error ) {
2023-11-24 20:35:35 +08:00
if srcObj . size > int64 ( f . opt . CopyCutoff ) {
2020-05-29 19:36:31 +08:00
if newInfo == nil {
newInfo , err = srcObj . getMetaData ( ctx )
if err != nil {
return err
}
}
up , err := f . newLargeUpload ( ctx , dstObj , nil , srcObj , f . opt . CopyCutoff , true , newInfo )
if err != nil {
return err
}
2023-10-12 18:15:42 +08:00
err = up . Copy ( ctx )
if err != nil {
return err
}
return dstObj . decodeMetaDataFileInfo ( up . info )
2019-05-21 22:25:16 +08:00
}
2020-05-29 19:36:31 +08:00
dstBucket , dstPath := dstObj . split ( )
err = f . makeBucket ( ctx , dstBucket )
if err != nil {
return err
2019-05-21 22:25:16 +08:00
}
2020-05-29 19:36:31 +08:00
2019-09-05 03:00:37 +08:00
destBucketID , err := f . getBucketID ( ctx , dstBucket )
2019-06-27 20:48:28 +08:00
if err != nil {
2020-05-29 19:36:31 +08:00
return err
2019-05-21 22:25:16 +08:00
}
2020-05-29 19:36:31 +08:00
2019-05-21 22:25:16 +08:00
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_copy_file" ,
}
var request = api . CopyFileRequest {
2020-05-29 19:36:31 +08:00
SourceID : srcObj . id ,
Name : f . opt . Enc . FromStandardPath ( dstPath ) ,
DestBucketID : destBucketID ,
}
if newInfo == nil {
request . MetadataDirective = "COPY"
} else {
request . MetadataDirective = "REPLACE"
request . ContentType = newInfo . ContentType
request . Info = newInfo . Info
2019-05-21 22:25:16 +08:00
}
var response api . FileInfo
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err := f . srv . CallJSON ( ctx , & opts , & request , & response )
return f . shouldRetry ( ctx , resp , err )
2019-05-21 22:25:16 +08:00
} )
if err != nil {
2020-05-29 19:36:31 +08:00
return err
2019-05-21 22:25:16 +08:00
}
2020-05-29 19:36:31 +08:00
return dstObj . decodeMetaDataFileInfo ( & response )
}
2020-10-14 05:43:40 +08:00
// Copy src to this remote using server-side copy operations.
2020-05-29 19:36:31 +08:00
//
2022-08-05 23:35:41 +08:00
// This is stored with the remote path given.
2020-05-29 19:36:31 +08:00
//
2022-08-05 23:35:41 +08:00
// It returns the destination Object and a possible error.
2020-05-29 19:36:31 +08:00
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func ( f * Fs ) Copy ( ctx context . Context , src fs . Object , remote string ) ( fs . Object , error ) {
srcObj , ok := src . ( * Object )
if ! ok {
fs . Debugf ( src , "Can't copy - not same remote type" )
return nil , fs . ErrorCantCopy
}
// Temporary Object under construction
dstObj := & Object {
2019-05-21 22:25:16 +08:00
fs : f ,
remote : remote ,
}
2020-05-29 19:36:31 +08:00
err := f . copy ( ctx , dstObj , srcObj , nil )
2019-05-21 22:25:16 +08:00
if err != nil {
return nil , err
}
2020-05-29 19:36:31 +08:00
return dstObj , nil
2019-05-21 22:25:16 +08:00
}
2016-01-11 20:39:33 +08:00
// Hashes returns the supported hash sets.
2018-01-13 00:30:54 +08:00
func ( f * Fs ) Hashes ( ) hash . Set {
2018-01-19 04:27:52 +08:00
return hash . Set ( hash . SHA1 )
2016-01-11 20:39:33 +08:00
}
2019-07-01 04:51:59 +08:00
// getDownloadAuthorization returns authorization token for downloading
2019-08-09 22:19:02 +08:00
// without account.
2019-09-05 03:00:37 +08:00
func ( f * Fs ) getDownloadAuthorization ( ctx context . Context , bucket , remote string ) ( authorization string , err error ) {
2019-07-01 04:51:59 +08:00
validDurationInSeconds := time . Duration ( f . opt . DownloadAuthorizationDuration ) . Nanoseconds ( ) / 1e9
if validDurationInSeconds <= 0 || validDurationInSeconds > 604800 {
return "" , errors . New ( "--b2-download-auth-duration must be between 1 sec and 1 week" )
}
2019-07-04 08:19:06 +08:00
if ! f . hasPermission ( "shareFiles" ) {
return "" , errors . New ( "sharing a file link requires the shareFiles permission" )
}
2019-09-05 03:00:37 +08:00
bucketID , err := f . getBucketID ( ctx , bucket )
2019-07-01 04:51:59 +08:00
if err != nil {
return "" , err
}
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_get_download_authorization" ,
}
var request = api . GetDownloadAuthorizationRequest {
BucketID : bucketID ,
2021-04-25 04:37:14 +08:00
FileNamePrefix : f . opt . Enc . FromStandardPath ( path . Join ( f . rootDirectory , remote ) ) ,
2019-07-01 04:51:59 +08:00
ValidDurationInSeconds : validDurationInSeconds ,
}
var response api . GetDownloadAuthorizationResponse
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err := f . srv . CallJSON ( ctx , & opts , & request , & response )
return f . shouldRetry ( ctx , resp , err )
2019-07-01 04:51:59 +08:00
} )
if err != nil {
2021-11-04 18:12:57 +08:00
return "" , fmt . Errorf ( "failed to get download authorization: %w" , err )
2019-07-01 04:51:59 +08:00
}
return response . AuthorizationToken , nil
}
2019-08-09 22:19:02 +08:00
// PublicLink returns a link for downloading without account
2020-06-01 05:18:01 +08:00
func ( f * Fs ) PublicLink ( ctx context . Context , remote string , expire fs . Duration , unlink bool ) ( link string , err error ) {
2019-08-09 22:19:02 +08:00
bucket , bucketPath := f . split ( remote )
2019-07-01 04:51:59 +08:00
var RootURL string
if f . opt . DownloadURL == "" {
RootURL = f . info . DownloadURL
} else {
RootURL = f . opt . DownloadURL
}
2019-07-25 15:53:51 +08:00
_ , err = f . NewObject ( ctx , remote )
if err == fs . ErrorObjectNotFound || err == fs . ErrorNotAFile {
2019-08-09 22:19:02 +08:00
err2 := f . list ( ctx , bucket , bucketPath , f . rootDirectory , f . rootBucket == "" , false , 1 , f . opt . Versions , false , func ( remote string , object * api . File , isDirectory bool ) error {
2019-07-31 08:15:37 +08:00
err = nil
return nil
2019-07-25 15:53:51 +08:00
} )
2019-07-31 08:15:37 +08:00
if err2 != nil {
return "" , err2
}
2019-07-25 15:53:51 +08:00
}
if err != nil {
return "" , err
}
2023-09-03 01:31:14 +08:00
absPath := "/" + urlEncode ( bucketPath )
2019-08-09 22:19:02 +08:00
link = RootURL + "/file/" + urlEncode ( bucket ) + absPath
2019-09-05 03:00:37 +08:00
bucketType , err := f . getbucketType ( ctx , bucket )
2019-07-01 04:51:59 +08:00
if err != nil {
return "" , err
}
if bucketType == "allPrivate" || bucketType == "snapshot" {
2019-09-05 03:00:37 +08:00
AuthorizationToken , err := f . getDownloadAuthorization ( ctx , bucket , remote )
2019-07-01 04:51:59 +08:00
if err != nil {
return "" , err
}
link += "?Authorization=" + AuthorizationToken
}
return link , nil
}
2015-11-28 02:25:52 +08:00
// ------------------------------------------------------------
// Fs returns the parent Fs
2016-02-18 19:35:25 +08:00
func ( o * Object ) Fs ( ) fs . Info {
2015-11-28 02:25:52 +08:00
return o . fs
}
// Return a string version
func ( o * Object ) String ( ) string {
if o == nil {
return "<nil>"
}
return o . remote
}
// Remote returns the remote path
func ( o * Object ) Remote ( ) string {
return o . remote
}
2016-01-11 20:39:33 +08:00
// Hash returns the Sha-1 of an object returning a lowercase hex string
2019-06-17 16:34:30 +08:00
func ( o * Object ) Hash ( ctx context . Context , t hash . Type ) ( string , error ) {
2018-01-19 04:27:52 +08:00
if t != hash . SHA1 {
return "" , hash . ErrUnsupported
2016-01-11 20:39:33 +08:00
}
2016-01-19 16:20:23 +08:00
if o . sha1 == "" {
2016-03-22 22:39:56 +08:00
// Error is logged in readMetaData
2019-06-17 16:34:30 +08:00
err := o . readMetaData ( ctx )
2016-01-19 16:20:23 +08:00
if err != nil {
return "" , err
}
2016-01-19 01:53:03 +08:00
}
2016-01-11 20:39:33 +08:00
return o . sha1 , nil
2015-11-28 02:25:52 +08:00
}
// Size returns the size of an object in bytes
func ( o * Object ) Size ( ) int64 {
2016-03-22 22:39:56 +08:00
return o . size
}
2020-04-25 00:22:34 +08:00
// Clean the SHA1
//
2022-08-05 23:35:41 +08:00
// Make sure it is lower case.
2020-04-25 00:22:34 +08:00
//
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
2020-10-14 05:49:58 +08:00
// Some tools (e.g. Cyberduck) use this
2022-06-09 04:25:17 +08:00
func cleanSHA1 ( sha1 string ) string {
2020-04-25 00:22:34 +08:00
const unverified = "unverified:"
2022-06-09 04:25:17 +08:00
return strings . TrimPrefix ( strings . ToLower ( sha1 ) , unverified )
2020-04-25 00:22:34 +08:00
}
2016-06-16 01:49:11 +08:00
// decodeMetaDataRaw sets the metadata from the data passed in
2016-03-22 22:39:56 +08:00
//
// Sets
2022-08-05 23:35:41 +08:00
//
// o.id
// o.modTime
// o.size
// o.sha1
2016-09-22 05:13:24 +08:00
func ( o * Object ) decodeMetaDataRaw ( ID , SHA1 string , Size int64 , UploadTimestamp api . Timestamp , Info map [ string ] string , mimeType string ) ( err error ) {
2016-06-16 01:49:11 +08:00
o . id = ID
o . sha1 = SHA1
2016-09-22 05:13:24 +08:00
o . mimeType = mimeType
2016-06-16 01:49:11 +08:00
// Read SHA1 from metadata if it exists and isn't set
if o . sha1 == "" || o . sha1 == "none" {
o . sha1 = Info [ sha1Key ]
}
2020-04-25 00:22:34 +08:00
o . sha1 = cleanSHA1 ( o . sha1 )
2016-06-16 01:49:11 +08:00
o . size = Size
2016-03-22 22:39:56 +08:00
// Use the UploadTimestamp if can't get file info
2016-06-16 01:49:11 +08:00
o . modTime = time . Time ( UploadTimestamp )
return o . parseTimeString ( Info [ timeKey ] )
}
// decodeMetaData sets the metadata in the object from an api.File
//
// Sets
2022-08-05 23:35:41 +08:00
//
// o.id
// o.modTime
// o.size
// o.sha1
2016-06-16 01:49:11 +08:00
func ( o * Object ) decodeMetaData ( info * api . File ) ( err error ) {
2016-09-22 05:13:24 +08:00
return o . decodeMetaDataRaw ( info . ID , info . SHA1 , info . Size , info . UploadTimestamp , info . Info , info . ContentType )
2016-06-16 01:49:11 +08:00
}
// decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
//
// Sets
2022-08-05 23:35:41 +08:00
//
// o.id
// o.modTime
// o.size
// o.sha1
2016-06-16 01:49:11 +08:00
func ( o * Object ) decodeMetaDataFileInfo ( info * api . FileInfo ) ( err error ) {
2016-09-22 05:13:24 +08:00
return o . decodeMetaDataRaw ( info . ID , info . SHA1 , info . Size , info . UploadTimestamp , info . Info , info . ContentType )
2015-11-28 02:25:52 +08:00
}
2020-12-04 01:24:46 +08:00
// getMetaDataListing gets the metadata from the object unconditionally from the listing
//
// Note that listing is a class C transaction which costs more than
// the B transaction used in getMetaData
func ( o * Object ) getMetaDataListing ( ctx context . Context ) ( info * api . File , err error ) {
2019-08-09 22:19:02 +08:00
bucket , bucketPath := o . split ( )
2016-07-05 18:26:02 +08:00
maxSearched := 1
var timestamp api . Timestamp
2018-05-15 01:06:57 +08:00
if o . fs . opt . Versions {
2019-08-09 22:19:02 +08:00
timestamp , bucketPath = api . RemoveVersion ( bucketPath )
2016-07-05 18:26:02 +08:00
maxSearched = maxVersions
}
2019-06-17 16:34:30 +08:00
2019-08-09 22:19:02 +08:00
err = o . fs . list ( ctx , bucket , bucketPath , "" , false , true , maxSearched , o . fs . opt . Versions , true , func ( remote string , object * api . File , isDirectory bool ) error {
2016-04-22 03:06:21 +08:00
if isDirectory {
return nil
}
2019-08-09 22:19:02 +08:00
if remote == bucketPath {
2016-07-05 18:26:02 +08:00
if ! timestamp . IsZero ( ) && ! timestamp . Equal ( object . UploadTimestamp ) {
return nil
}
2016-03-22 22:39:56 +08:00
info = object
2015-11-28 02:25:52 +08:00
}
2016-02-19 20:09:11 +08:00
return errEndList // read only 1 item
2015-11-28 02:25:52 +08:00
} )
2016-03-22 22:39:56 +08:00
if err != nil {
2016-06-26 04:23:20 +08:00
if err == fs . ErrorDirNotFound {
2019-05-24 23:07:22 +08:00
return nil , fs . ErrorObjectNotFound
2016-06-26 04:23:20 +08:00
}
2019-05-24 23:07:22 +08:00
return nil , err
2016-03-22 22:39:56 +08:00
}
if info == nil {
2019-05-24 23:07:22 +08:00
return nil , fs . ErrorObjectNotFound
}
return info , nil
}
2020-12-04 01:24:46 +08:00
// getMetaData gets the metadata from the object unconditionally
func ( o * Object ) getMetaData ( ctx context . Context ) ( info * api . File , err error ) {
// If using versions and have a version suffix, need to list the directory to find the correct versions
if o . fs . opt . Versions {
timestamp , _ := api . RemoveVersion ( o . remote )
if ! timestamp . IsZero ( ) {
return o . getMetaDataListing ( ctx )
}
}
_ , info , err = o . getOrHead ( ctx , "HEAD" , nil )
return info , err
}
2019-05-24 23:07:22 +08:00
// readMetaData gets the metadata if it hasn't already been fetched
//
// Sets
2022-08-05 23:35:41 +08:00
//
// o.id
// o.modTime
// o.size
// o.sha1
2019-06-17 16:34:30 +08:00
func ( o * Object ) readMetaData ( ctx context . Context ) ( err error ) {
2019-05-24 23:07:22 +08:00
if o . id != "" {
return nil
}
2019-06-17 16:34:30 +08:00
info , err := o . getMetaData ( ctx )
2019-05-24 23:07:22 +08:00
if err != nil {
return err
2015-11-28 02:25:52 +08:00
}
2016-03-22 22:39:56 +08:00
return o . decodeMetaData ( info )
2015-11-28 02:25:52 +08:00
}
// timeString returns modTime as the number of milliseconds
// elapsed since January 1, 1970 UTC as a decimal string.
func timeString ( modTime time . Time ) string {
2019-09-05 20:59:06 +08:00
return strconv . FormatInt ( modTime . UnixNano ( ) / 1e6 , 10 )
2015-11-28 02:25:52 +08:00
}
// parseTimeString converts a decimal string number of milliseconds
2016-03-22 18:26:37 +08:00
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
// the modTime variable.
func ( o * Object ) parseTimeString ( timeString string ) ( err error ) {
2015-11-28 02:25:52 +08:00
if timeString == "" {
2016-03-22 18:26:37 +08:00
return nil
2015-11-28 02:25:52 +08:00
}
unixMilliseconds , err := strconv . ParseInt ( timeString , 10 , 64 )
if err != nil {
2017-02-09 19:01:20 +08:00
fs . Debugf ( o , "Failed to parse mod time string %q: %v" , timeString , err )
2019-03-25 23:51:45 +08:00
return nil
2015-11-28 02:25:52 +08:00
}
2019-09-05 20:59:06 +08:00
o . modTime = time . Unix ( unixMilliseconds / 1e3 , ( unixMilliseconds % 1e3 ) * 1e6 ) . UTC ( )
2016-03-22 18:26:37 +08:00
return nil
2016-01-11 20:39:33 +08:00
}
2016-03-22 18:26:37 +08:00
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
//
// SHA-1 will also be updated once the request has completed.
2019-06-17 16:34:30 +08:00
func ( o * Object ) ModTime ( ctx context . Context ) ( result time . Time ) {
2016-03-22 22:39:56 +08:00
// The error is logged in readMetaData
2019-06-17 16:34:30 +08:00
_ = o . readMetaData ( ctx )
2016-03-22 18:26:37 +08:00
return o . modTime
}
2019-05-24 23:07:22 +08:00
// SetModTime sets the modification time of the Object
2019-06-17 16:34:30 +08:00
func ( o * Object ) SetModTime ( ctx context . Context , modTime time . Time ) error {
info , err := o . getMetaData ( ctx )
2019-05-24 23:07:22 +08:00
if err != nil {
return err
}
info . Info [ timeKey ] = timeString ( modTime )
2020-05-29 19:36:31 +08:00
// Copy to the same name, overwriting the metadata only
return o . fs . copy ( ctx , o , o , info )
2015-11-28 02:25:52 +08:00
}
// Storable returns if this object is storable
func ( o * Object ) Storable ( ) bool {
return true
}
// openFile represents an Object open for reading
type openFile struct {
o * Object // Object we are reading for
resp * http . Response // response of the GET
body io . Reader // reading from here
2018-01-13 00:30:54 +08:00
hash gohash . Hash // currently accumulating SHA1
2015-11-28 02:25:52 +08:00
bytes int64 // number of bytes read on this connection
eof bool // whether we have read end of file
}
// newOpenFile wraps an io.ReadCloser and checks the sha1sum
func newOpenFile ( o * Object , resp * http . Response ) * openFile {
file := & openFile {
o : o ,
resp : resp ,
hash : sha1 . New ( ) ,
}
file . body = io . TeeReader ( resp . Body , file . hash )
return file
}
// Read bytes from the object - see io.Reader
func ( file * openFile ) Read ( p [ ] byte ) ( n int , err error ) {
n , err = file . body . Read ( p )
file . bytes += int64 ( n )
if err == io . EOF {
file . eof = true
}
return
}
// Close the object and checks the length and SHA1 if all the object
// was read
func ( file * openFile ) Close ( ) ( err error ) {
// Close the body at the end
defer fs . CheckClose ( file . resp . Body , & err )
// If not end of file then can't check SHA1
if ! file . eof {
return nil
}
// Check to see we read the correct number of bytes
if file . o . Size ( ) != file . bytes {
2022-02-07 03:35:27 +08:00
return fmt . Errorf ( "corrupted on transfer: lengths differ want %d vs got %d" , file . o . Size ( ) , file . bytes )
2015-11-28 02:25:52 +08:00
}
// Check the SHA1
2016-09-06 00:26:04 +08:00
receivedSHA1 := file . o . sha1
2015-11-28 02:25:52 +08:00
calculatedSHA1 := fmt . Sprintf ( "%x" , file . hash . Sum ( nil ) )
2017-09-01 04:19:54 +08:00
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
2022-02-07 03:35:27 +08:00
return fmt . Errorf ( "corrupted on transfer: SHA1 hashes differ want %q vs got %q" , receivedSHA1 , calculatedSHA1 )
2015-11-28 02:25:52 +08:00
}
return nil
}
// Check it satisfies the interfaces
var _ io . ReadCloser = & openFile { }
2020-12-04 01:24:46 +08:00
func ( o * Object ) getOrHead ( ctx context . Context , method string , options [ ] fs . OpenOption ) ( resp * http . Response , info * api . File , err error ) {
2015-11-28 02:25:52 +08:00
opts := rest . Opts {
2020-12-04 01:24:46 +08:00
Method : method ,
Options : options ,
NoResponse : method == "HEAD" ,
2016-07-05 18:26:02 +08:00
}
2019-02-10 05:56:24 +08:00
// Use downloadUrl from backblaze if downloadUrl is not set
// otherwise use the custom downloadUrl
if o . fs . opt . DownloadURL == "" {
opts . RootURL = o . fs . info . DownloadURL
} else {
opts . RootURL = o . fs . opt . DownloadURL
}
2020-04-11 20:47:56 +08:00
// Download by id if set and not using DownloadURL otherwise by name
if o . id != "" && o . fs . opt . DownloadURL == "" {
2016-07-05 18:26:02 +08:00
opts . Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode ( o . id )
} else {
2019-08-09 22:19:02 +08:00
bucket , bucketPath := o . split ( )
2020-01-15 01:33:35 +08:00
opts . Path += "/file/" + urlEncode ( o . fs . opt . Enc . FromStandardName ( bucket ) ) + "/" + urlEncode ( o . fs . opt . Enc . FromStandardPath ( bucketPath ) )
2015-11-28 02:25:52 +08:00
}
2016-02-24 06:15:20 +08:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err = o . fs . srv . Call ( ctx , & opts )
return o . fs . shouldRetry ( ctx , resp , err )
2016-02-24 06:15:20 +08:00
} )
2015-11-28 02:25:52 +08:00
if err != nil {
2020-12-04 01:24:46 +08:00
// 404 for files, 400 for directories
if resp != nil && ( resp . StatusCode == http . StatusNotFound || resp . StatusCode == http . StatusBadRequest ) {
return nil , nil , fs . ErrorObjectNotFound
}
2021-11-04 18:12:57 +08:00
return nil , nil , fmt . Errorf ( "failed to %s for download: %w" , method , err )
2015-11-28 02:25:52 +08:00
}
2020-12-04 01:24:46 +08:00
// NB resp may be Open here - don't return err != nil without closing
// Convert the Headers into an api.File
var uploadTimestamp api . Timestamp
err = uploadTimestamp . UnmarshalJSON ( [ ] byte ( resp . Header . Get ( timestampHeader ) ) )
2015-11-28 02:25:52 +08:00
if err != nil {
2020-12-04 01:24:46 +08:00
fs . Debugf ( o , "Bad " + timestampHeader + " header: %v" , err )
}
var Info = make ( map [ string ] string )
for k , vs := range resp . Header {
k = strings . ToLower ( k )
for _ , v := range vs {
if strings . HasPrefix ( k , headerPrefix ) {
Info [ k [ len ( headerPrefix ) : ] ] = v
}
2016-09-06 00:26:04 +08:00
}
2016-01-11 20:39:33 +08:00
}
2020-12-04 01:24:46 +08:00
info = & api . File {
ID : resp . Header . Get ( idHeader ) ,
Name : resp . Header . Get ( nameHeader ) ,
Action : "upload" ,
Size : resp . ContentLength ,
UploadTimestamp : uploadTimestamp ,
SHA1 : resp . Header . Get ( sha1Header ) ,
ContentType : resp . Header . Get ( "Content-Type" ) ,
Info : Info ,
}
2021-03-25 01:04:05 +08:00
// When reading files from B2 via cloudflare using
// --b2-download-url cloudflare strips the Content-Length
// headers (presumably so it can inject stuff) so use the old
// length read from the listing.
if info . Size < 0 {
info . Size = o . size
}
2020-12-04 01:24:46 +08:00
return resp , info , nil
}
// Open an object for read
func ( o * Object ) Open ( ctx context . Context , options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
fs . FixRangeOption ( options , o . size )
resp , info , err := o . getOrHead ( ctx , "GET" , options )
if err != nil {
return nil , err
}
// Don't check length or hash or metadata on partial content
2016-10-07 19:16:25 +08:00
if resp . StatusCode == http . StatusPartialContent {
return resp . Body , nil
}
2020-12-04 01:24:46 +08:00
err = o . decodeMetaData ( info )
if err != nil {
_ = resp . Body . Close ( )
return nil , err
}
2015-11-28 02:25:52 +08:00
return newOpenFile ( o , resp ) , nil
}
// dontEncode is the characters that do not need percent-encoding
//
// The characters that do not need percent-encoding are a subset of
// the printable ASCII characters: upper-case letters, lower-case
// letters, digits, ".", "_", "-", "/", "~", "!", "$", "'", "(", ")",
// "*", ";", "=", ":", and "@". All other byte values in a UTF-8 must
// be replaced with "%" and the two-digit hex value of the byte.
const dontEncode = ( ` abcdefghijklmnopqrstuvwxyz ` +
` ABCDEFGHIJKLMNOPQRSTUVWXYZ ` +
` 0123456789 ` +
` ._-/~!$'()*;=:@ ` )
// noNeedToEncode is a bitmap of characters which don't need % encoding
var noNeedToEncode [ 256 ] bool
func init ( ) {
for _ , c := range dontEncode {
noNeedToEncode [ c ] = true
}
}
// urlEncode encodes in with % encoding
func urlEncode ( in string ) string {
var out bytes . Buffer
for i := 0 ; i < len ( in ) ; i ++ {
c := in [ i ]
if noNeedToEncode [ c ] {
_ = out . WriteByte ( c )
} else {
_ , _ = out . WriteString ( fmt . Sprintf ( "%%%2X" , c ) )
}
}
return out . String ( )
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
2019-06-17 16:34:30 +08:00
func ( o * Object ) Update ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( err error ) {
2018-05-15 01:06:57 +08:00
if o . fs . opt . Versions {
2016-07-05 18:26:02 +08:00
return errNotWithVersions
}
2022-03-15 11:55:20 +08:00
if o . fs . opt . VersionAt . IsSet ( ) {
return errNotWithVersionAt
}
2016-02-18 19:35:25 +08:00
size := src . Size ( )
2016-06-16 01:49:11 +08:00
2019-08-09 22:19:02 +08:00
bucket , bucketPath := o . split ( )
2019-08-23 04:30:55 +08:00
err = o . fs . makeBucket ( ctx , bucket )
if err != nil {
return err
}
2023-08-19 22:32:45 +08:00
if size < 0 {
2017-09-17 04:43:48 +08:00
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
2023-08-19 22:32:45 +08:00
rw := o . fs . getRW ( false )
2020-05-29 19:36:31 +08:00
2023-08-19 22:32:45 +08:00
n , err := io . CopyN ( rw , in , int64 ( o . fs . opt . ChunkSize ) )
2017-09-17 04:43:48 +08:00
if err == nil {
bufReader := bufio . NewReader ( in )
in = bufReader
_ , err = bufReader . Peek ( 1 )
}
if err == nil {
fs . Debugf ( o , "File is big enough for chunked streaming" )
2020-05-29 19:36:31 +08:00
up , err := o . fs . newLargeUpload ( ctx , o , in , src , o . fs . opt . ChunkSize , false , nil )
2017-09-17 04:43:48 +08:00
if err != nil {
2023-08-19 22:32:45 +08:00
o . fs . putRW ( rw )
2017-09-17 04:43:48 +08:00
return err
}
2020-05-29 19:36:31 +08:00
// NB Stream returns the buffer and token
2023-10-13 22:46:36 +08:00
err = up . Stream ( ctx , rw )
if err != nil {
return err
}
return o . decodeMetaDataFileInfo ( up . info )
2023-08-19 22:32:45 +08:00
} else if err == io . EOF {
2017-09-17 04:43:48 +08:00
fs . Debugf ( o , "File has %d bytes, which makes only one chunk. Using direct upload." , n )
2023-08-19 22:32:45 +08:00
defer o . fs . putRW ( rw )
size = n
in = rw
2017-09-17 04:43:48 +08:00
} else {
2023-08-19 22:32:45 +08:00
o . fs . putRW ( rw )
2017-09-17 04:43:48 +08:00
return err
}
2018-05-15 01:06:57 +08:00
} else if size > int64 ( o . fs . opt . UploadCutoff ) {
2023-09-19 03:41:31 +08:00
chunkWriter , err := multipart . UploadMultipart ( ctx , src , in , multipart . UploadMultipartOptions {
2023-08-19 22:32:45 +08:00
Open : o . fs ,
OpenOptions : options ,
} )
2023-09-19 03:41:31 +08:00
if err != nil {
return err
}
up := chunkWriter . ( * largeUpload )
return o . decodeMetaDataFileInfo ( up . info )
2016-06-16 01:49:11 +08:00
}
2019-06-17 16:34:30 +08:00
modTime := src . ModTime ( ctx )
2016-02-19 22:45:32 +08:00
2019-06-17 16:34:30 +08:00
calculatedSha1 , _ := src . Hash ( ctx , hash . SHA1 )
2016-02-19 22:45:32 +08:00
if calculatedSha1 == "" {
2017-08-12 18:57:34 +08:00
calculatedSha1 = "hex_digits_at_end"
har := newHashAppendingReader ( in , sha1 . New ( ) )
size += int64 ( har . AdditionalLength ( ) )
in = har
2015-11-28 02:25:52 +08:00
}
// Get upload URL
2019-09-05 03:00:37 +08:00
upload , err := o . fs . getUploadURL ( ctx , bucket )
2015-11-28 02:25:52 +08:00
if err != nil {
return err
}
2017-01-18 01:34:21 +08:00
defer func ( ) {
// return it like this because we might nil it out
o . fs . returnUploadURL ( upload )
} ( )
2015-11-28 02:25:52 +08:00
// Headers for upload file
//
// Authorization
// required
// An upload authorization token, from b2_get_upload_url.
//
// X-Bz-File-Name
// required
//
// The name of the file, in percent-encoded UTF-8. See Files for requirements on file names. See String Encoding.
//
// Content-Type
// required
//
// The MIME type of the content of the file, which will be returned in
// the Content-Type header when downloading the file. Use the
// Content-Type b2/x-auto to automatically set the stored Content-Type
// post upload. In the case where a file extension is absent or the
// lookup fails, the Content-Type is set to application/octet-stream. The
2019-02-08 01:41:17 +08:00
// Content-Type mappings can be pursued here.
2015-11-28 02:25:52 +08:00
//
// X-Bz-Content-Sha1
// required
//
// The SHA1 checksum of the content of the file. B2 will check this when
// the file is uploaded, to make sure that the file arrived correctly. It
// will be returned in the X-Bz-Content-Sha1 header when the file is
// downloaded.
//
// X-Bz-Info-src_last_modified_millis
// optional
//
// If the original source of the file being uploaded has a last modified
// time concept, Backblaze recommends using this spelling of one of your
// ten X-Bz-Info-* headers (see below). Using a standard spelling allows
// different B2 clients and the B2 web user interface to interoperate
// correctly. The value should be a base 10 number which represents a UTC
// time when the original source file was last modified. It is a base 10
// number of milliseconds since midnight, January 1, 1970 UTC. This fits
// in a 64 bit integer such as the type "long" in the programming
// language Java. It is intended to be compatible with Java's time
// long. For example, it can be passed directly into the Java call
// Date.setTime(long time).
//
// X-Bz-Info-*
// optional
//
// Up to 10 of these headers may be present. The * part of the header
// name is replace with the name of a custom field in the file
// information stored with the file, and the value is an arbitrary UTF-8
// string, percent-encoded. The same info headers sent with the upload
// will be returned with the download.
opts := rest . Opts {
2017-07-07 15:18:13 +08:00
Method : "POST" ,
RootURL : upload . UploadURL ,
Body : in ,
2020-02-17 03:26:55 +08:00
Options : options ,
2015-11-28 02:25:52 +08:00
ExtraHeaders : map [ string ] string {
2016-02-27 21:00:35 +08:00
"Authorization" : upload . AuthorizationToken ,
2020-01-15 01:33:35 +08:00
"X-Bz-File-Name" : urlEncode ( o . fs . opt . Enc . FromStandardPath ( bucketPath ) ) ,
2019-06-17 16:34:30 +08:00
"Content-Type" : fs . MimeType ( ctx , src ) ,
2015-11-28 02:25:52 +08:00
sha1Header : calculatedSha1 ,
timeHeader : timeString ( modTime ) ,
} ,
ContentLength : & size ,
}
var response api . FileInfo
2016-02-24 06:15:20 +08:00
// Don't retry, return a retry error instead
err = o . fs . pacer . CallNoRetry ( func ( ) ( bool , error ) {
2019-09-05 03:00:37 +08:00
resp , err := o . fs . srv . CallJSON ( ctx , & opts , nil , & response )
retry , err := o . fs . shouldRetry ( ctx , resp , err )
2016-07-01 23:23:23 +08:00
// On retryable error clear UploadURL
if retry {
2017-02-09 19:01:20 +08:00
fs . Debugf ( o , "Clearing upload URL because of error: %v" , err )
2016-06-16 01:49:11 +08:00
upload = nil
}
2016-07-01 23:23:23 +08:00
return retry , err
2016-02-24 06:15:20 +08:00
} )
2015-11-28 02:25:52 +08:00
if err != nil {
2016-02-24 06:15:20 +08:00
return err
2015-11-28 02:25:52 +08:00
}
2016-06-16 01:49:11 +08:00
return o . decodeMetaDataFileInfo ( & response )
2015-11-28 02:25:52 +08:00
}
2023-08-19 22:32:45 +08:00
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
2023-09-02 00:25:15 +08:00
func ( f * Fs ) OpenChunkWriter ( ctx context . Context , remote string , src fs . ObjectInfo , options ... fs . OpenOption ) ( info fs . ChunkWriterInfo , writer fs . ChunkWriter , err error ) {
2023-08-19 22:32:45 +08:00
// FIXME what if file is smaller than 1 chunk?
if f . opt . Versions {
2023-09-02 00:25:15 +08:00
return info , nil , errNotWithVersions
2023-08-19 22:32:45 +08:00
}
if f . opt . VersionAt . IsSet ( ) {
2023-09-02 00:25:15 +08:00
return info , nil , errNotWithVersionAt
2023-08-19 22:32:45 +08:00
}
//size := src.Size()
// Temporary Object under construction
o := & Object {
fs : f ,
2023-10-27 23:45:37 +08:00
remote : remote ,
2023-08-19 22:32:45 +08:00
}
bucket , _ := o . split ( )
err = f . makeBucket ( ctx , bucket )
if err != nil {
2023-09-02 00:25:15 +08:00
return info , nil , err
2023-08-19 22:32:45 +08:00
}
2023-09-02 00:25:15 +08:00
info = fs . ChunkWriterInfo {
ChunkSize : int64 ( f . opt . ChunkSize ) ,
Concurrency : o . fs . opt . UploadConcurrency ,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
2023-08-19 22:32:45 +08:00
up , err := f . newLargeUpload ( ctx , o , nil , src , f . opt . ChunkSize , false , nil )
2023-09-02 00:25:15 +08:00
return info , up , err
2023-08-19 22:32:45 +08:00
}
2015-11-28 02:25:52 +08:00
// Remove an object
2019-06-17 16:34:30 +08:00
func ( o * Object ) Remove ( ctx context . Context ) error {
2019-08-09 22:19:02 +08:00
bucket , bucketPath := o . split ( )
2018-05-15 01:06:57 +08:00
if o . fs . opt . Versions {
2016-07-05 18:26:02 +08:00
return errNotWithVersions
}
2022-03-15 11:55:20 +08:00
if o . fs . opt . VersionAt . IsSet ( ) {
return errNotWithVersionAt
}
2018-05-15 01:06:57 +08:00
if o . fs . opt . HardDelete {
2019-09-05 03:00:37 +08:00
return o . fs . deleteByID ( ctx , o . id , bucketPath )
2015-11-28 02:25:52 +08:00
}
2019-09-05 03:00:37 +08:00
return o . fs . hide ( ctx , bucket , bucketPath )
2015-11-28 02:25:52 +08:00
}
2016-09-22 05:13:24 +08:00
// MimeType of an Object if known, "" otherwise
2019-06-17 16:34:30 +08:00
func ( o * Object ) MimeType ( ctx context . Context ) string {
2016-09-22 05:13:24 +08:00
return o . mimeType
}
2018-05-13 16:16:56 +08:00
// ID returns the ID of the Object if known, or "" if not
func ( o * Object ) ID ( ) string {
return o . id
}
2023-09-16 23:44:28 +08:00
var lifecycleHelp = fs . CommandHelp {
Name : "lifecycle" ,
Short : "Read or set the lifecycle for a bucket" ,
Long : ` This command can be used to read or set the lifecycle for a bucket .
Usage Examples :
To show the current lifecycle rules :
rclone backend lifecycle b2 : bucket
This will dump something like this showing the lifecycle rules .
[
{
"daysFromHidingToDeleting" : 1 ,
"daysFromUploadingToHiding" : null ,
"fileNamePrefix" : ""
}
]
If there are no lifecycle rules ( the default ) then it will just return [ ] .
To reset the current lifecycle rules :
rclone backend lifecycle b2 : bucket - o daysFromHidingToDeleting = 30
rclone backend lifecycle b2 : bucket - o daysFromUploadingToHiding = 5 - o daysFromHidingToDeleting = 1
This will run and then print the new lifecycle rules as above .
Rclone only lets you set lifecycles for the whole bucket with the
fileNamePrefix = "" .
You can ' t disable versioning with B2 . The best you can do is to set
the daysFromHidingToDeleting to 1 day . You can enable hard_delete in
the config also which will mean deletions won ' t cause versions but
overwrites will still cause versions to be made .
rclone backend lifecycle b2 : bucket - o daysFromHidingToDeleting = 1
See : https : //www.backblaze.com/docs/cloud-storage-lifecycle-rules
` ,
Opts : map [ string ] string {
"daysFromHidingToDeleting" : "After a file has been hidden for this many days it is deleted. 0 is off." ,
"daysFromUploadingToHiding" : "This many days after uploading a file is hidden" ,
} ,
}
func ( f * Fs ) lifecycleCommand ( ctx context . Context , name string , arg [ ] string , opt map [ string ] string ) ( out interface { } , err error ) {
var newRule api . LifecycleRule
if daysStr := opt [ "daysFromHidingToDeleting" ] ; daysStr != "" {
days , err := strconv . Atoi ( daysStr )
if err != nil {
return nil , fmt . Errorf ( "bad daysFromHidingToDeleting: %w" , err )
}
newRule . DaysFromHidingToDeleting = & days
}
if daysStr := opt [ "daysFromUploadingToHiding" ] ; daysStr != "" {
days , err := strconv . Atoi ( daysStr )
if err != nil {
return nil , fmt . Errorf ( "bad daysFromUploadingToHiding: %w" , err )
}
newRule . DaysFromUploadingToHiding = & days
}
bucketName , _ := f . split ( "" )
if bucketName == "" {
return nil , errors . New ( "bucket required" )
}
var bucket * api . Bucket
if newRule . DaysFromHidingToDeleting != nil || newRule . DaysFromUploadingToHiding != nil {
bucketID , err := f . getBucketID ( ctx , bucketName )
if err != nil {
return nil , err
}
opts := rest . Opts {
Method : "POST" ,
Path : "/b2_update_bucket" ,
}
var request = api . UpdateBucketRequest {
ID : bucketID ,
AccountID : f . info . AccountID ,
LifecycleRules : [ ] api . LifecycleRule { newRule } ,
}
var response api . Bucket
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err := f . srv . CallJSON ( ctx , & opts , & request , & response )
return f . shouldRetry ( ctx , resp , err )
} )
if err != nil {
return nil , err
}
bucket = & response
} else {
err = f . listBucketsToFn ( ctx , bucketName , func ( b * api . Bucket ) error {
bucket = b
return nil
} )
if err != nil {
return nil , err
}
}
if bucket == nil {
return nil , fs . ErrorDirNotFound
}
return bucket . LifecycleRules , nil
}
2024-03-01 08:49:34 +08:00
var cleanupHelp = fs . CommandHelp {
Name : "cleanup" ,
Short : "Remove unfinished large file uploads." ,
Long : ` This command removes unfinished large file uploads of age greater than
max - age , which defaults to 24 hours .
Note that you can use -- interactive / - i or -- dry - run with this command to see what
it would do .
rclone backend cleanup b2 : bucket / path / to / object
rclone backend cleanup - o max - age = 7 w b2 : bucket / path / to / object
Durations are parsed as per the rest of rclone , 2 h , 7 d , 7 w etc .
` ,
Opts : map [ string ] string {
"max-age" : "Max age of upload to delete" ,
} ,
}
func ( f * Fs ) cleanupCommand ( ctx context . Context , name string , arg [ ] string , opt map [ string ] string ) ( out interface { } , err error ) {
maxAge := defaultMaxAge
if opt [ "max-age" ] != "" {
maxAge , err = fs . ParseDuration ( opt [ "max-age" ] )
if err != nil {
return nil , fmt . Errorf ( "bad max-age: %w" , err )
}
}
return nil , f . cleanUp ( ctx , false , true , maxAge )
}
var cleanupHiddenHelp = fs . CommandHelp {
Name : "cleanup-hidden" ,
Short : "Remove old versions of files." ,
Long : ` This command removes any old hidden versions of files .
Note that you can use -- interactive / - i or -- dry - run with this command to see what
it would do .
rclone backend cleanup - hidden b2 : bucket / path / to / dir
` ,
}
func ( f * Fs ) cleanupHiddenCommand ( ctx context . Context , name string , arg [ ] string , opt map [ string ] string ) ( out interface { } , err error ) {
return nil , f . cleanUp ( ctx , true , false , 0 )
}
2023-09-16 23:44:28 +08:00
var commandHelp = [ ] fs . CommandHelp {
lifecycleHelp ,
2024-03-01 08:49:34 +08:00
cleanupHelp ,
cleanupHiddenHelp ,
2023-09-16 23:44:28 +08:00
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func ( f * Fs ) Command ( ctx context . Context , name string , arg [ ] string , opt map [ string ] string ) ( out interface { } , err error ) {
switch name {
case "lifecycle" :
return f . lifecycleCommand ( ctx , name , arg , opt )
2024-03-01 08:49:34 +08:00
case "cleanup" :
return f . cleanupCommand ( ctx , name , arg , opt )
case "cleanup-hidden" :
return f . cleanupHiddenCommand ( ctx , name , arg , opt )
2023-09-16 23:44:28 +08:00
default :
return nil , fs . ErrorCommandNotFound
}
}
2015-11-28 02:25:52 +08:00
// Check the interfaces are satisfied
var (
2023-08-19 22:32:45 +08:00
_ fs . Fs = & Fs { }
_ fs . Purger = & Fs { }
_ fs . Copier = & Fs { }
_ fs . PutStreamer = & Fs { }
_ fs . CleanUpper = & Fs { }
_ fs . ListRer = & Fs { }
_ fs . PublicLinker = & Fs { }
_ fs . OpenChunkWriter = & Fs { }
2023-09-16 23:44:28 +08:00
_ fs . Commander = & Fs { }
2023-08-19 22:32:45 +08:00
_ fs . Object = & Object { }
_ fs . MimeTyper = & Object { }
_ fs . IDer = & Object { }
2015-11-28 02:25:52 +08:00
)