2021-09-09 20:25:25 +08:00
|
|
|
//go:build !plan9 && !js
|
2020-08-01 02:57:48 +08:00
|
|
|
// +build !plan9,!js
|
2017-08-03 21:31:55 +08:00
|
|
|
|
2022-08-28 19:21:57 +08:00
|
|
|
// Package qingstor provides an interface to QingStor object storage
|
|
|
|
// Home: https://www.qingcloud.com/
|
2017-06-26 07:47:54 +08:00
|
|
|
package qingstor
|
|
|
|
|
|
|
|
import (
|
2019-06-17 16:34:30 +08:00
|
|
|
"context"
|
2021-11-04 18:12:57 +08:00
|
|
|
"errors"
|
2017-06-26 07:47:54 +08:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net/http"
|
|
|
|
"path"
|
|
|
|
"regexp"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2019-07-29 01:47:38 +08:00
|
|
|
"github.com/rclone/rclone/fs"
|
2020-01-15 01:33:35 +08:00
|
|
|
"github.com/rclone/rclone/fs/config"
|
2019-07-29 01:47:38 +08:00
|
|
|
"github.com/rclone/rclone/fs/config/configmap"
|
|
|
|
"github.com/rclone/rclone/fs/config/configstruct"
|
|
|
|
"github.com/rclone/rclone/fs/fshttp"
|
|
|
|
"github.com/rclone/rclone/fs/hash"
|
|
|
|
"github.com/rclone/rclone/fs/walk"
|
2019-08-16 22:31:48 +08:00
|
|
|
"github.com/rclone/rclone/lib/bucket"
|
2020-01-15 01:33:35 +08:00
|
|
|
"github.com/rclone/rclone/lib/encoder"
|
2019-08-14 21:54:32 +08:00
|
|
|
qsConfig "github.com/yunify/qingstor-sdk-go/v3/config"
|
|
|
|
qsErr "github.com/yunify/qingstor-sdk-go/v3/request/errors"
|
|
|
|
qs "github.com/yunify/qingstor-sdk-go/v3/service"
|
2017-06-26 07:47:54 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
// Register with Fs
|
|
|
|
func init() {
|
|
|
|
fs.Register(&fs.RegInfo{
|
|
|
|
Name: "qingstor",
|
2017-12-21 14:45:00 +08:00
|
|
|
Description: "QingCloud Object Storage",
|
2017-06-26 07:47:54 +08:00
|
|
|
NewFs: NewFs,
|
|
|
|
Options: []fs.Option{{
|
2018-05-15 01:06:57 +08:00
|
|
|
Name: "env_auth",
|
2021-08-16 17:30:01 +08:00
|
|
|
Help: "Get QingStor credentials from runtime.\n\nOnly applies if access_key_id and secret_access_key is blank.",
|
2018-05-15 01:06:57 +08:00
|
|
|
Default: false,
|
|
|
|
Examples: []fs.OptionExample{{
|
|
|
|
Value: "false",
|
2021-08-16 17:30:01 +08:00
|
|
|
Help: "Enter QingStor credentials in the next step.",
|
2018-05-15 01:06:57 +08:00
|
|
|
}, {
|
|
|
|
Value: "true",
|
2021-08-16 17:30:01 +08:00
|
|
|
Help: "Get QingStor credentials from the environment (env vars or IAM).",
|
2018-05-15 01:06:57 +08:00
|
|
|
}},
|
2017-06-26 07:47:54 +08:00
|
|
|
}, {
|
|
|
|
Name: "access_key_id",
|
2021-08-16 17:30:01 +08:00
|
|
|
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
2017-06-26 07:47:54 +08:00
|
|
|
}, {
|
|
|
|
Name: "secret_access_key",
|
2021-08-16 17:30:01 +08:00
|
|
|
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
2017-06-26 07:47:54 +08:00
|
|
|
}, {
|
|
|
|
Name: "endpoint",
|
2021-08-16 17:30:01 +08:00
|
|
|
Help: "Enter an endpoint URL to connection QingStor API.\n\nLeave blank will use the default value \"https://qingstor.com:443\".",
|
2017-06-26 07:47:54 +08:00
|
|
|
}, {
|
|
|
|
Name: "zone",
|
2021-08-16 17:30:01 +08:00
|
|
|
Help: "Zone to connect to.\n\nDefault is \"pek3a\".",
|
2018-05-15 01:06:57 +08:00
|
|
|
Examples: []fs.OptionExample{{
|
|
|
|
Value: "pek3a",
|
2021-08-16 17:30:01 +08:00
|
|
|
Help: "The Beijing (China) Three Zone.\nNeeds location constraint pek3a.",
|
2018-05-15 01:06:57 +08:00
|
|
|
}, {
|
|
|
|
Value: "sh1a",
|
2021-08-16 17:30:01 +08:00
|
|
|
Help: "The Shanghai (China) First Zone.\nNeeds location constraint sh1a.",
|
2018-05-15 01:06:57 +08:00
|
|
|
}, {
|
|
|
|
Value: "gd2a",
|
2021-08-16 17:30:01 +08:00
|
|
|
Help: "The Guangdong (China) Second Zone.\nNeeds location constraint gd2a.",
|
2018-05-15 01:06:57 +08:00
|
|
|
}},
|
2017-06-26 07:47:54 +08:00
|
|
|
}, {
|
2018-05-15 01:06:57 +08:00
|
|
|
Name: "connection_retries",
|
2018-11-15 00:11:58 +08:00
|
|
|
Help: "Number of connection retries.",
|
2018-05-15 01:06:57 +08:00
|
|
|
Default: 3,
|
|
|
|
Advanced: true,
|
2019-01-04 19:24:20 +08:00
|
|
|
}, {
|
|
|
|
Name: "upload_cutoff",
|
2021-08-16 17:30:01 +08:00
|
|
|
Help: `Cutoff for switching to chunked upload.
|
2019-01-04 19:24:20 +08:00
|
|
|
|
|
|
|
Any files larger than this will be uploaded in chunks of chunk_size.
|
2021-03-03 03:11:57 +08:00
|
|
|
The minimum is 0 and the maximum is 5 GiB.`,
|
2019-01-04 19:24:20 +08:00
|
|
|
Default: defaultUploadCutoff,
|
|
|
|
Advanced: true,
|
|
|
|
}, {
|
|
|
|
Name: "chunk_size",
|
|
|
|
Help: `Chunk size to use for uploading.
|
|
|
|
|
|
|
|
When uploading files larger than upload_cutoff they will be uploaded
|
|
|
|
as multipart uploads using this chunk size.
|
|
|
|
|
|
|
|
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
|
|
|
|
in memory per transfer.
|
|
|
|
|
2020-10-14 05:50:53 +08:00
|
|
|
If you are transferring large files over high-speed links and you have
|
2019-01-04 19:24:20 +08:00
|
|
|
enough memory, then increasing this will speed up the transfers.`,
|
|
|
|
Default: minChunkSize,
|
|
|
|
Advanced: true,
|
|
|
|
}, {
|
|
|
|
Name: "upload_concurrency",
|
|
|
|
Help: `Concurrency for multipart uploads.
|
|
|
|
|
|
|
|
This is the number of chunks of the same file that are uploaded
|
|
|
|
concurrently.
|
|
|
|
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 08:17:24 +08:00
|
|
|
NB if you set this to > 1 then the checksums of multipart uploads
|
2019-01-28 05:09:11 +08:00
|
|
|
become corrupted (the uploads themselves are not corrupted though).
|
|
|
|
|
2020-10-14 05:59:13 +08:00
|
|
|
If you are uploading small numbers of large files over high-speed links
|
2019-01-04 19:24:20 +08:00
|
|
|
and these uploads do not fully utilize your bandwidth, then increasing
|
|
|
|
this may help to speed up the transfers.`,
|
2019-01-28 05:09:11 +08:00
|
|
|
Default: 1,
|
2019-01-04 19:24:20 +08:00
|
|
|
Advanced: true,
|
2020-01-15 01:33:35 +08:00
|
|
|
}, {
|
|
|
|
Name: config.ConfigEncoding,
|
|
|
|
Help: config.ConfigEncodingHelp,
|
|
|
|
Advanced: true,
|
2020-01-15 05:51:49 +08:00
|
|
|
Default: (encoder.EncodeInvalidUtf8 |
|
|
|
|
encoder.EncodeCtl |
|
|
|
|
encoder.EncodeSlash),
|
2017-06-26 07:47:54 +08:00
|
|
|
}},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Constants
|
|
|
|
const (
|
2019-01-04 19:24:20 +08:00
|
|
|
listLimitSize = 1000 // Number of items to read at once
|
|
|
|
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
|
|
|
|
minChunkSize = fs.SizeSuffix(minMultiPartSize)
|
|
|
|
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
|
|
|
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
2017-06-26 07:47:54 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
// Globals
|
|
|
|
func timestampToTime(tp int64) time.Time {
|
|
|
|
timeLayout := time.RFC3339Nano
|
|
|
|
ts := time.Unix(tp, 0).Format(timeLayout)
|
|
|
|
tm, _ := time.Parse(timeLayout, ts)
|
|
|
|
return tm.UTC()
|
|
|
|
}
|
|
|
|
|
2018-05-15 01:06:57 +08:00
|
|
|
// Options defines the configuration for this backend
|
|
|
|
type Options struct {
|
2020-01-15 01:33:35 +08:00
|
|
|
EnvAuth bool `config:"env_auth"`
|
|
|
|
AccessKeyID string `config:"access_key_id"`
|
|
|
|
SecretAccessKey string `config:"secret_access_key"`
|
|
|
|
Endpoint string `config:"endpoint"`
|
|
|
|
Zone string `config:"zone"`
|
|
|
|
ConnectionRetries int `config:"connection_retries"`
|
|
|
|
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
|
|
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
|
|
|
UploadConcurrency int `config:"upload_concurrency"`
|
|
|
|
Enc encoder.MultiEncoder `config:"encoding"`
|
2018-05-15 01:06:57 +08:00
|
|
|
}
|
|
|
|
|
2017-06-26 07:47:54 +08:00
|
|
|
// Fs represents a remote qingstor server
|
|
|
|
type Fs struct {
|
2019-08-16 22:31:48 +08:00
|
|
|
name string // The name of the remote
|
|
|
|
root string // The root is a subdir, is a special object
|
|
|
|
opt Options // parsed options
|
|
|
|
features *fs.Features // optional features
|
|
|
|
svc *qs.Service // The connection to the qingstor server
|
|
|
|
zone string // The zone we are working on
|
|
|
|
rootBucket string // bucket part of root (if any)
|
|
|
|
rootDirectory string // directory part of root (if any)
|
|
|
|
cache *bucket.Cache // cache for bucket creation status
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Object describes a qingstor object
|
|
|
|
type Object struct {
|
|
|
|
// Will definitely have everything but meta which may be nil
|
|
|
|
//
|
|
|
|
// List will read everything but meta & mimeType - to fill
|
|
|
|
// that in you need to call readMetaData
|
|
|
|
fs *Fs // what this object is part of
|
|
|
|
remote string // object of remote
|
|
|
|
etag string // md5sum of the object
|
|
|
|
size int64 // length of the object content
|
|
|
|
mimeType string // ContentType of object - may be ""
|
|
|
|
lastModified time.Time // Last modified
|
|
|
|
encrypted bool // whether the object is encryption
|
|
|
|
algo string // Custom encryption algorithms
|
|
|
|
}
|
|
|
|
|
|
|
|
// ------------------------------------------------------------
|
|
|
|
|
2019-08-16 22:31:48 +08:00
|
|
|
// parsePath parses a remote 'url'
|
|
|
|
func parsePath(path string) (root string) {
|
|
|
|
root = strings.Trim(path, "/")
|
2017-06-26 07:47:54 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-08-16 22:31:48 +08:00
|
|
|
// split returns bucket and bucketPath from the rootRelativePath
|
|
|
|
// relative to f.root
|
|
|
|
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
2019-09-21 20:24:30 +08:00
|
|
|
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
2020-01-15 01:33:35 +08:00
|
|
|
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
2019-08-16 22:31:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// split returns bucket and bucketPath from the object
|
|
|
|
func (o *Object) split() (bucket, bucketPath string) {
|
|
|
|
return o.fs.split(o.remote)
|
|
|
|
}
|
|
|
|
|
2020-05-20 18:39:20 +08:00
|
|
|
// Split a URL into three parts: protocol host and port
|
2017-06-26 07:47:54 +08:00
|
|
|
func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
|
|
|
|
/*
|
2020-05-19 19:02:44 +08:00
|
|
|
Pattern to match an endpoint,
|
2020-10-14 05:49:58 +08:00
|
|
|
e.g.: "http(s)://qingstor.com:443" --> "http(s)", "qingstor.com", 443
|
2017-06-26 07:47:54 +08:00
|
|
|
"http(s)//qingstor.com" --> "http(s)", "qingstor.com", ""
|
|
|
|
"qingstor.com" --> "", "qingstor.com", ""
|
|
|
|
*/
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
switch x := r.(type) {
|
|
|
|
case error:
|
|
|
|
err = x
|
|
|
|
default:
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
var mather = regexp.MustCompile(`^(?:(http|https)://)*(\w+\.(?:[\w\.])*)(?::(\d{0,5}))*$`)
|
|
|
|
parts := mather.FindStringSubmatch(endpoint)
|
|
|
|
protocol, host, port = parts[1], parts[2], parts[3]
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// qsConnection makes a connection to qingstor
|
2020-11-05 19:33:32 +08:00
|
|
|
func qsServiceConnection(ctx context.Context, opt *Options) (*qs.Service, error) {
|
2018-05-15 01:06:57 +08:00
|
|
|
accessKeyID := opt.AccessKeyID
|
|
|
|
secretAccessKey := opt.SecretAccessKey
|
2017-06-26 07:47:54 +08:00
|
|
|
|
|
|
|
switch {
|
2018-05-15 01:06:57 +08:00
|
|
|
case opt.EnvAuth:
|
2017-06-26 07:47:54 +08:00
|
|
|
// No need for empty checks if "env_auth" is true
|
|
|
|
case accessKeyID == "" && secretAccessKey == "":
|
|
|
|
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
|
|
|
case accessKeyID == "":
|
|
|
|
return nil, errors.New("access_key_id not found")
|
|
|
|
case secretAccessKey == "":
|
|
|
|
return nil, errors.New("secret_access_key not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
protocol := "https"
|
|
|
|
host := "qingstor.com"
|
|
|
|
port := 443
|
|
|
|
|
2018-05-15 01:06:57 +08:00
|
|
|
endpoint := opt.Endpoint
|
2017-06-26 07:47:54 +08:00
|
|
|
if endpoint != "" {
|
|
|
|
_protocol, _host, _port, err := qsParseEndpoint(endpoint)
|
|
|
|
|
|
|
|
if err != nil {
|
2022-06-09 04:54:39 +08:00
|
|
|
return nil, fmt.Errorf("the endpoint \"%s\" format error", endpoint)
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if _protocol != "" {
|
|
|
|
protocol = _protocol
|
|
|
|
}
|
|
|
|
host = _host
|
|
|
|
if _port != "" {
|
|
|
|
port, _ = strconv.Atoi(_port)
|
2017-08-15 16:28:22 +08:00
|
|
|
} else if protocol == "http" {
|
|
|
|
port = 80
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-01-13 00:30:54 +08:00
|
|
|
cf, err := qsConfig.NewDefault()
|
2018-05-15 01:06:57 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-06-26 07:47:54 +08:00
|
|
|
cf.AccessKeyID = accessKeyID
|
|
|
|
cf.SecretAccessKey = secretAccessKey
|
|
|
|
cf.Protocol = protocol
|
|
|
|
cf.Host = host
|
|
|
|
cf.Port = port
|
2019-11-11 23:04:53 +08:00
|
|
|
// unsupported in v3.1: cf.ConnectionRetries = opt.ConnectionRetries
|
2020-11-13 23:24:43 +08:00
|
|
|
cf.Connection = fshttp.NewClient(ctx)
|
2017-06-26 07:47:54 +08:00
|
|
|
|
2018-05-15 01:06:57 +08:00
|
|
|
return qs.Init(cf)
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
|
|
|
|
2019-01-04 19:24:20 +08:00
|
|
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
|
|
|
if cs < minChunkSize {
|
2021-11-04 18:12:57 +08:00
|
|
|
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
2019-01-04 19:24:20 +08:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|
|
|
err = checkUploadChunkSize(cs)
|
|
|
|
if err == nil {
|
|
|
|
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
|
|
|
if cs > maxUploadCutoff {
|
2021-11-04 18:12:57 +08:00
|
|
|
return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
2019-01-04 19:24:20 +08:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|
|
|
err = checkUploadCutoff(cs)
|
|
|
|
if err == nil {
|
|
|
|
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-08-16 22:31:48 +08:00
|
|
|
// setRoot changes the root of the Fs
|
|
|
|
func (f *Fs) setRoot(root string) {
|
|
|
|
f.root = parsePath(root)
|
|
|
|
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
|
|
|
}
|
|
|
|
|
2017-06-26 07:47:54 +08:00
|
|
|
// NewFs constructs an Fs from the path, bucket:path
|
2020-11-05 23:18:51 +08:00
|
|
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
2018-05-15 01:06:57 +08:00
|
|
|
// Parse config into Options struct
|
|
|
|
opt := new(Options)
|
|
|
|
err := configstruct.Set(m, opt)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-01-04 19:24:20 +08:00
|
|
|
err = checkUploadChunkSize(opt.ChunkSize)
|
|
|
|
if err != nil {
|
2021-11-04 18:12:57 +08:00
|
|
|
return nil, fmt.Errorf("qingstor: chunk size: %w", err)
|
2019-01-04 19:24:20 +08:00
|
|
|
}
|
|
|
|
err = checkUploadCutoff(opt.UploadCutoff)
|
|
|
|
if err != nil {
|
2021-11-04 18:12:57 +08:00
|
|
|
return nil, fmt.Errorf("qingstor: upload cutoff: %w", err)
|
2019-01-04 19:24:20 +08:00
|
|
|
}
|
2020-11-05 19:33:32 +08:00
|
|
|
svc, err := qsServiceConnection(ctx, opt)
|
2017-06-26 07:47:54 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-05-15 01:06:57 +08:00
|
|
|
if opt.Zone == "" {
|
|
|
|
opt.Zone = "pek3a"
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
f := &Fs{
|
2019-08-16 22:31:48 +08:00
|
|
|
name: name,
|
|
|
|
opt: *opt,
|
|
|
|
svc: svc,
|
|
|
|
zone: opt.Zone,
|
|
|
|
cache: bucket.NewCache(),
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
f.setRoot(root)
|
2017-08-09 22:27:43 +08:00
|
|
|
f.features = (&fs.Features{
|
2019-08-16 22:31:48 +08:00
|
|
|
ReadMimeType: true,
|
|
|
|
WriteMimeType: true,
|
|
|
|
BucketBased: true,
|
|
|
|
BucketBasedRootOK: true,
|
2020-06-19 17:28:34 +08:00
|
|
|
SlowModTime: true,
|
2020-11-06 00:00:40 +08:00
|
|
|
}).Fill(ctx, f)
|
2017-06-26 07:47:54 +08:00
|
|
|
|
2019-08-16 22:31:48 +08:00
|
|
|
if f.rootBucket != "" && f.rootDirectory != "" {
|
|
|
|
// Check to see if the object exists
|
|
|
|
bucketInit, err := svc.Bucket(f.rootBucket, opt.Zone)
|
2017-06-26 07:47:54 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-01-15 01:33:35 +08:00
|
|
|
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
2019-09-21 20:24:30 +08:00
|
|
|
_, err = bucketInit.HeadObject(encodedDirectory, &qs.HeadObjectInput{})
|
2017-06-26 07:47:54 +08:00
|
|
|
if err == nil {
|
2019-08-16 22:31:48 +08:00
|
|
|
newRoot := path.Dir(f.root)
|
|
|
|
if newRoot == "." {
|
|
|
|
newRoot = ""
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
f.setRoot(newRoot)
|
2017-06-26 07:47:54 +08:00
|
|
|
// return an error with an fs which points to the parent
|
|
|
|
return f, fs.ErrorIsFile
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return f, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Name of the remote (as passed into NewFs)
|
|
|
|
func (f *Fs) Name() string {
|
|
|
|
return f.name
|
|
|
|
}
|
|
|
|
|
|
|
|
// Root of the remote (as passed into NewFs)
|
|
|
|
func (f *Fs) Root() string {
|
2019-08-16 22:31:48 +08:00
|
|
|
return f.root
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// String converts this Fs to a string
|
|
|
|
func (f *Fs) String() string {
|
2019-08-16 22:31:48 +08:00
|
|
|
if f.rootBucket == "" {
|
2020-02-14 03:47:20 +08:00
|
|
|
return "QingStor root"
|
2019-08-16 22:31:48 +08:00
|
|
|
}
|
|
|
|
if f.rootDirectory == "" {
|
|
|
|
return fmt.Sprintf("QingStor bucket %s", f.rootBucket)
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
return fmt.Sprintf("QingStor bucket %s path %s", f.rootBucket, f.rootDirectory)
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Precision of the remote
|
|
|
|
func (f *Fs) Precision() time.Duration {
|
|
|
|
//return time.Nanosecond
|
|
|
|
//Not supported temporary
|
|
|
|
return fs.ModTimeNotSupported
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hashes returns the supported hash sets.
|
2018-01-13 00:30:54 +08:00
|
|
|
func (f *Fs) Hashes() hash.Set {
|
2018-01-19 04:27:52 +08:00
|
|
|
return hash.Set(hash.MD5)
|
2018-01-13 00:30:54 +08:00
|
|
|
//return hash.HashSet(hash.HashNone)
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Features returns the optional features of this Fs
|
|
|
|
func (f *Fs) Features() *fs.Features {
|
|
|
|
return f.features
|
|
|
|
}
|
|
|
|
|
|
|
|
// Put created a new object
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
2017-06-26 07:47:54 +08:00
|
|
|
fsObj := &Object{
|
|
|
|
fs: f,
|
|
|
|
remote: src.Remote(),
|
|
|
|
}
|
2019-06-17 16:34:30 +08:00
|
|
|
return fsObj, fsObj.Update(ctx, in, src, options...)
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
|
|
|
|
2020-10-14 05:43:40 +08:00
|
|
|
// Copy src to this remote using server-side copy operations.
|
2017-06-26 07:47:54 +08:00
|
|
|
//
|
2022-08-05 23:35:41 +08:00
|
|
|
// This is stored with the remote path given.
|
2017-06-26 07:47:54 +08:00
|
|
|
//
|
2022-08-05 23:35:41 +08:00
|
|
|
// It returns the destination Object and a possible error.
|
2017-06-26 07:47:54 +08:00
|
|
|
//
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
//
|
|
|
|
// If it isn't possible then return fs.ErrorCantCopy
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
2019-08-16 22:31:48 +08:00
|
|
|
dstBucket, dstPath := f.split(remote)
|
2019-08-23 04:30:55 +08:00
|
|
|
err := f.makeBucket(ctx, dstBucket)
|
2017-08-03 21:31:55 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-06-26 07:47:54 +08:00
|
|
|
srcObj, ok := src.(*Object)
|
|
|
|
if !ok {
|
|
|
|
fs.Debugf(src, "Can't copy - not same remote type")
|
|
|
|
return nil, fs.ErrorCantCopy
|
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
srcBucket, srcPath := srcObj.split()
|
|
|
|
source := path.Join("/", srcBucket, srcPath)
|
2017-06-26 07:47:54 +08:00
|
|
|
|
2019-08-16 22:31:48 +08:00
|
|
|
// fs.Debugf(f, "Copied, source key is: %s, and dst key is: %s", source, key)
|
2017-06-26 07:47:54 +08:00
|
|
|
req := qs.PutObjectInput{
|
|
|
|
XQSCopySource: &source,
|
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
bucketInit, err := f.svc.Bucket(dstBucket, f.zone)
|
2017-06-26 07:47:54 +08:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
_, err = bucketInit.PutObject(dstPath, &req)
|
2017-06-26 07:47:54 +08:00
|
|
|
if err != nil {
|
2019-08-16 22:31:48 +08:00
|
|
|
// fs.Debugf(f, "Copy Failed, API Error: %v", err)
|
2017-06-26 07:47:54 +08:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-06-17 16:34:30 +08:00
|
|
|
return f.NewObject(ctx, remote)
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewObject finds the Object at remote. If it can't be found
|
|
|
|
// it returns the error fs.ErrorObjectNotFound.
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
2017-06-26 07:47:54 +08:00
|
|
|
return f.newObjectWithInfo(remote, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return an Object from a path
|
|
|
|
//
|
2022-08-05 23:35:41 +08:00
|
|
|
// If it can't be found it returns the error ErrorObjectNotFound.
|
2017-06-26 07:47:54 +08:00
|
|
|
func (f *Fs) newObjectWithInfo(remote string, info *qs.KeyType) (fs.Object, error) {
|
|
|
|
o := &Object{
|
|
|
|
fs: f,
|
|
|
|
remote: remote,
|
|
|
|
}
|
|
|
|
if info != nil {
|
|
|
|
// Set info
|
|
|
|
if info.Size != nil {
|
|
|
|
o.size = *info.Size
|
|
|
|
}
|
|
|
|
|
|
|
|
if info.Etag != nil {
|
|
|
|
o.etag = qs.StringValue(info.Etag)
|
|
|
|
}
|
|
|
|
if info.Modified == nil {
|
|
|
|
fs.Logf(o, "Failed to read last modified")
|
|
|
|
o.lastModified = time.Now()
|
|
|
|
} else {
|
|
|
|
o.lastModified = timestampToTime(int64(*info.Modified))
|
|
|
|
}
|
|
|
|
|
|
|
|
if info.MimeType != nil {
|
|
|
|
o.mimeType = qs.StringValue(info.MimeType)
|
|
|
|
}
|
|
|
|
|
|
|
|
if info.Encrypted != nil {
|
|
|
|
o.encrypted = qs.BoolValue(info.Encrypted)
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
err := o.readMetaData() // reads info and meta, returning an error
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return o, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// listFn is called from list to handle an object.
|
|
|
|
type listFn func(remote string, object *qs.KeyType, isDirectory bool) error
|
|
|
|
|
|
|
|
// list the objects into the function supplied
|
|
|
|
//
|
|
|
|
// dir is the starting directory, "" for root
|
|
|
|
//
|
|
|
|
// Set recurse to read sub directories
|
2019-08-16 22:31:48 +08:00
|
|
|
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
|
|
|
|
if prefix != "" {
|
|
|
|
prefix += "/"
|
|
|
|
}
|
|
|
|
if directory != "" {
|
|
|
|
directory += "/"
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
|
|
|
delimiter := ""
|
|
|
|
if !recurse {
|
|
|
|
delimiter = "/"
|
|
|
|
}
|
|
|
|
maxLimit := int(listLimitSize)
|
|
|
|
var marker *string
|
|
|
|
for {
|
2019-08-16 22:31:48 +08:00
|
|
|
bucketInit, err := f.svc.Bucket(bucket, f.zone)
|
2017-06-26 07:47:54 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
req := qs.ListObjectsInput{
|
|
|
|
Delimiter: &delimiter,
|
2019-08-16 22:31:48 +08:00
|
|
|
Prefix: &directory,
|
2017-06-26 07:47:54 +08:00
|
|
|
Limit: &maxLimit,
|
|
|
|
Marker: marker,
|
|
|
|
}
|
|
|
|
resp, err := bucketInit.ListObjects(&req)
|
|
|
|
if err != nil {
|
|
|
|
if e, ok := err.(*qsErr.QingStorError); ok {
|
|
|
|
if e.StatusCode == http.StatusNotFound {
|
|
|
|
err = fs.ErrorDirNotFound
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !recurse {
|
|
|
|
for _, commonPrefix := range resp.CommonPrefixes {
|
|
|
|
if commonPrefix == nil {
|
|
|
|
fs.Logf(f, "Nil common prefix received")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
remote := *commonPrefix
|
2020-01-15 01:33:35 +08:00
|
|
|
remote = f.opt.Enc.ToStandardPath(remote)
|
2019-08-16 22:31:48 +08:00
|
|
|
if !strings.HasPrefix(remote, prefix) {
|
2017-06-26 07:47:54 +08:00
|
|
|
fs.Logf(f, "Odd name received %q", remote)
|
|
|
|
continue
|
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
remote = remote[len(prefix):]
|
|
|
|
if addBucket {
|
|
|
|
remote = path.Join(bucket, remote)
|
|
|
|
}
|
2022-06-09 04:25:17 +08:00
|
|
|
remote = strings.TrimSuffix(remote, "/")
|
2017-06-26 07:47:54 +08:00
|
|
|
err = fn(remote, &qs.KeyType{Key: &remote}, true)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, object := range resp.Keys {
|
2019-09-21 20:24:30 +08:00
|
|
|
remote := qs.StringValue(object.Key)
|
2020-01-15 01:33:35 +08:00
|
|
|
remote = f.opt.Enc.ToStandardPath(remote)
|
2019-09-21 20:24:30 +08:00
|
|
|
if !strings.HasPrefix(remote, prefix) {
|
|
|
|
fs.Logf(f, "Odd name received %q", remote)
|
2017-06-26 07:47:54 +08:00
|
|
|
continue
|
|
|
|
}
|
2019-09-21 20:24:30 +08:00
|
|
|
remote = remote[len(prefix):]
|
2019-08-16 22:31:48 +08:00
|
|
|
if addBucket {
|
|
|
|
remote = path.Join(bucket, remote)
|
|
|
|
}
|
2017-06-26 07:47:54 +08:00
|
|
|
err = fn(remote, object, false)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2019-08-14 21:54:32 +08:00
|
|
|
if resp.HasMore != nil && !*resp.HasMore {
|
|
|
|
break
|
|
|
|
}
|
2017-06-26 07:47:54 +08:00
|
|
|
// Use NextMarker if set, otherwise use last Key
|
|
|
|
if resp.NextMarker == nil || *resp.NextMarker == "" {
|
2019-08-14 21:54:32 +08:00
|
|
|
fs.Errorf(f, "Expecting NextMarker but didn't find one")
|
2017-06-26 07:47:54 +08:00
|
|
|
break
|
|
|
|
} else {
|
|
|
|
marker = resp.NextMarker
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert a list item into a BasicInfo
|
|
|
|
func (f *Fs) itemToDirEntry(remote string, object *qs.KeyType, isDirectory bool) (fs.DirEntry, error) {
|
|
|
|
if isDirectory {
|
|
|
|
size := int64(0)
|
|
|
|
if object.Size != nil {
|
|
|
|
size = *object.Size
|
|
|
|
}
|
|
|
|
d := fs.NewDir(remote, time.Time{}).SetSize(size)
|
|
|
|
return d, nil
|
|
|
|
}
|
|
|
|
o, err := f.newObjectWithInfo(remote, object)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return o, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// listDir lists files and directories to out
|
2019-08-16 22:31:48 +08:00
|
|
|
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
2017-06-26 07:47:54 +08:00
|
|
|
// List the objects and directories
|
2019-08-16 22:31:48 +08:00
|
|
|
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *qs.KeyType, isDirectory bool) error {
|
2017-06-26 07:47:54 +08:00
|
|
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if entry != nil {
|
|
|
|
entries = append(entries, entry)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-03-01 20:11:34 +08:00
|
|
|
// bucket must be present if listing succeeded
|
2019-08-16 22:31:48 +08:00
|
|
|
f.cache.MarkOK(bucket)
|
2017-06-26 07:47:54 +08:00
|
|
|
return entries, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// listBuckets lists the buckets to out
|
2019-08-23 04:30:55 +08:00
|
|
|
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
2017-06-26 07:47:54 +08:00
|
|
|
req := qs.ListBucketsInput{
|
|
|
|
Location: &f.zone,
|
|
|
|
}
|
|
|
|
resp, err := f.svc.ListBuckets(&req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, bucket := range resp.Buckets {
|
2020-01-15 01:33:35 +08:00
|
|
|
d := fs.NewDir(f.opt.Enc.ToStandardName(qs.StringValue(bucket.Name)), qs.TimeValue(bucket.Created))
|
2017-06-26 07:47:54 +08:00
|
|
|
entries = append(entries, d)
|
|
|
|
}
|
|
|
|
return entries, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// List the objects and directories in dir into entries. The
|
|
|
|
// entries can be returned in any order but should be for a
|
|
|
|
// complete directory.
|
|
|
|
//
|
|
|
|
// dir should be "" to list the root, and should not have
|
|
|
|
// trailing slashes.
|
|
|
|
//
|
|
|
|
// This should return ErrDirNotFound if the directory isn't
|
|
|
|
// found.
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
2019-08-16 22:31:48 +08:00
|
|
|
bucket, directory := f.split(dir)
|
|
|
|
if bucket == "" {
|
2019-08-23 04:30:55 +08:00
|
|
|
if directory != "" {
|
|
|
|
return nil, fs.ErrorListBucketRequired
|
|
|
|
}
|
|
|
|
return f.listBuckets(ctx)
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListR lists the objects and directories of the Fs starting
|
|
|
|
// from dir recursively into out.
|
|
|
|
//
|
|
|
|
// dir should be "" to start from the root, and should not
|
|
|
|
// have trailing slashes.
|
|
|
|
//
|
|
|
|
// This should return ErrDirNotFound if the directory isn't
|
|
|
|
// found.
|
|
|
|
//
|
|
|
|
// It should call callback for each tranche of entries read.
|
|
|
|
// These need not be returned in any particular order. If
|
|
|
|
// callback returns an error then the listing will stop
|
|
|
|
// immediately.
|
|
|
|
//
|
|
|
|
// Don't implement this unless you have a more efficient way
|
|
|
|
// of listing recursively that doing a directory traversal.
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
2019-08-16 22:31:48 +08:00
|
|
|
bucket, directory := f.split(dir)
|
2018-01-13 00:30:54 +08:00
|
|
|
list := walk.NewListRHelper(callback)
|
2019-08-16 22:31:48 +08:00
|
|
|
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
|
|
|
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
|
|
|
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return list.Add(entry)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if bucket == "" {
|
2019-08-23 04:30:55 +08:00
|
|
|
entries, err := f.listBuckets(ctx)
|
2019-08-16 22:31:48 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, entry := range entries {
|
|
|
|
err = list.Add(entry)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
bucket := entry.Remote()
|
|
|
|
err = listR(bucket, "", f.rootDirectory, true)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-08-23 04:30:55 +08:00
|
|
|
// bucket must be present if listing succeeded
|
|
|
|
f.cache.MarkOK(bucket)
|
2019-08-16 22:31:48 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
|
2017-06-26 07:47:54 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-08-23 04:30:55 +08:00
|
|
|
// bucket must be present if listing succeeded
|
|
|
|
f.cache.MarkOK(bucket)
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
|
|
|
return list.Flush()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mkdir creates the bucket if it doesn't exist
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
2019-08-16 22:31:48 +08:00
|
|
|
bucket, _ := f.split(dir)
|
2019-08-23 04:30:55 +08:00
|
|
|
return f.makeBucket(ctx, bucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
// makeBucket creates the bucket if it doesn't exist
|
|
|
|
func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
2019-08-16 22:31:48 +08:00
|
|
|
return f.cache.Create(bucket, func() error {
|
|
|
|
bucketInit, err := f.svc.Bucket(bucket, f.zone)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-08-15 16:28:22 +08:00
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
/* When delete a bucket, qingstor need about 60 second to sync status;
|
|
|
|
So, need wait for it sync end if we try to operation a just deleted bucket
|
|
|
|
*/
|
|
|
|
wasDeleted := false
|
|
|
|
retries := 0
|
|
|
|
for retries <= 120 {
|
|
|
|
statistics, err := bucketInit.GetStatistics()
|
|
|
|
if statistics == nil || err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
switch *statistics.Status {
|
|
|
|
case "deleted":
|
|
|
|
fs.Debugf(f, "Wait for qingstor bucket to be deleted, retries: %d", retries)
|
|
|
|
time.Sleep(time.Second * 1)
|
|
|
|
retries++
|
|
|
|
wasDeleted = true
|
|
|
|
continue
|
|
|
|
}
|
2017-08-15 16:28:22 +08:00
|
|
|
break
|
|
|
|
}
|
2017-06-26 07:47:54 +08:00
|
|
|
|
2019-08-16 22:31:48 +08:00
|
|
|
retries = 0
|
|
|
|
for retries <= 120 {
|
|
|
|
_, err = bucketInit.Put()
|
|
|
|
if e, ok := err.(*qsErr.QingStorError); ok {
|
|
|
|
if e.StatusCode == http.StatusConflict {
|
|
|
|
if wasDeleted {
|
|
|
|
fs.Debugf(f, "Wait for qingstor bucket to be creatable, retries: %d", retries)
|
|
|
|
time.Sleep(time.Second * 1)
|
|
|
|
retries++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
return err
|
|
|
|
}, nil)
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
|
|
|
|
2019-08-16 22:31:48 +08:00
|
|
|
// bucketIsEmpty check if the bucket empty
|
|
|
|
func (f *Fs) bucketIsEmpty(bucket string) (bool, error) {
|
|
|
|
bucketInit, err := f.svc.Bucket(bucket, f.zone)
|
2017-06-26 07:47:54 +08:00
|
|
|
if err != nil {
|
|
|
|
return true, err
|
|
|
|
}
|
|
|
|
|
2017-08-15 16:28:22 +08:00
|
|
|
statistics, err := bucketInit.GetStatistics()
|
2017-06-26 07:47:54 +08:00
|
|
|
if err != nil {
|
2017-08-15 16:28:22 +08:00
|
|
|
return true, err
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
2017-08-15 16:28:22 +08:00
|
|
|
|
|
|
|
if *statistics.Count == 0 {
|
2017-06-26 07:47:54 +08:00
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Rmdir delete a bucket
|
2019-06-17 16:34:30 +08:00
|
|
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
2019-08-16 22:31:48 +08:00
|
|
|
bucket, directory := f.split(dir)
|
|
|
|
if bucket == "" || directory != "" {
|
2017-06-26 07:47:54 +08:00
|
|
|
return nil
|
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
isEmpty, err := f.bucketIsEmpty(bucket)
|
2017-06-26 07:47:54 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !isEmpty {
|
2019-08-16 22:31:48 +08:00
|
|
|
// fs.Debugf(f, "The bucket %s you tried to delete not empty.", bucket)
|
2017-06-26 07:47:54 +08:00
|
|
|
return errors.New("BucketNotEmpty: The bucket you tried to delete is not empty")
|
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
return f.cache.Remove(bucket, func() error {
|
|
|
|
// fs.Debugf(f, "Deleting the bucket %s", bucket)
|
|
|
|
bucketInit, err := f.svc.Bucket(bucket, f.zone)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
retries := 0
|
|
|
|
for retries <= 10 {
|
|
|
|
_, delErr := bucketInit.Delete()
|
|
|
|
if delErr != nil {
|
|
|
|
if e, ok := delErr.(*qsErr.QingStorError); ok {
|
|
|
|
switch e.Code {
|
|
|
|
// The status of "lease" takes a few seconds to "ready" when creating a new bucket
|
|
|
|
// wait for lease status ready
|
|
|
|
case "lease_not_ready":
|
|
|
|
fs.Debugf(f, "QingStor bucket lease not ready, retries: %d", retries)
|
|
|
|
retries++
|
|
|
|
time.Sleep(time.Second * 1)
|
|
|
|
continue
|
|
|
|
default:
|
|
|
|
err = e
|
|
|
|
}
|
2017-08-15 16:28:22 +08:00
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
} else {
|
|
|
|
err = delErr
|
2017-08-15 16:28:22 +08:00
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
break
|
2017-08-15 16:28:22 +08:00
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
return err
|
|
|
|
})
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
|
|
|
|
2020-03-18 19:53:25 +08:00
|
|
|
// cleanUpBucket removes all pending multipart uploads for a given bucket
|
|
|
|
func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
|
|
|
|
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than 24 hours", bucket)
|
|
|
|
bucketInit, err := f.svc.Bucket(bucket, f.zone)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-01-22 01:26:28 +08:00
|
|
|
// maxLimit := int(listLimitSize)
|
2020-03-18 19:53:25 +08:00
|
|
|
var marker *string
|
|
|
|
for {
|
|
|
|
req := qs.ListMultipartUploadsInput{
|
2021-01-22 01:26:28 +08:00
|
|
|
// The default is 200 but this errors if more than 200 is put in so leave at the default
|
|
|
|
// Limit: &maxLimit,
|
2020-03-18 19:53:25 +08:00
|
|
|
KeyMarker: marker,
|
|
|
|
}
|
|
|
|
var resp *qs.ListMultipartUploadsOutput
|
|
|
|
resp, err = bucketInit.ListMultipartUploads(&req)
|
|
|
|
if err != nil {
|
2021-11-04 18:12:57 +08:00
|
|
|
return fmt.Errorf("clean up bucket list multipart uploads: %w", err)
|
2020-03-18 19:53:25 +08:00
|
|
|
}
|
|
|
|
for _, upload := range resp.Uploads {
|
|
|
|
if upload.Created != nil && upload.Key != nil && upload.UploadID != nil {
|
|
|
|
age := time.Since(*upload.Created)
|
|
|
|
if age > 24*time.Hour {
|
|
|
|
fs.Infof(f, "removing pending multipart upload for %q dated %v (%v ago)", *upload.Key, upload.Created, age)
|
|
|
|
req := qs.AbortMultipartUploadInput{
|
|
|
|
UploadID: upload.UploadID,
|
|
|
|
}
|
|
|
|
_, abortErr := bucketInit.AbortMultipartUpload(*upload.Key, &req)
|
|
|
|
if abortErr != nil {
|
2021-11-04 18:12:57 +08:00
|
|
|
err = fmt.Errorf("failed to remove multipart upload for %q: %w", *upload.Key, abortErr)
|
2020-03-18 19:53:25 +08:00
|
|
|
fs.Errorf(f, "%v", err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fs.Debugf(f, "ignoring pending multipart upload for %q dated %v (%v ago)", *upload.Key, upload.Created, age)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if resp.HasMore != nil && !*resp.HasMore {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// Use NextMarker if set, otherwise use last Key
|
|
|
|
if resp.NextKeyMarker == nil || *resp.NextKeyMarker == "" {
|
|
|
|
fs.Errorf(f, "Expecting NextKeyMarker but didn't find one")
|
|
|
|
break
|
|
|
|
} else {
|
|
|
|
marker = resp.NextKeyMarker
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// CleanUp removes all pending multipart uploads
|
|
|
|
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|
|
|
if f.rootBucket != "" {
|
|
|
|
return f.cleanUpBucket(ctx, f.rootBucket)
|
|
|
|
}
|
|
|
|
entries, err := f.listBuckets(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, entry := range entries {
|
|
|
|
cleanErr := f.cleanUpBucket(ctx, f.opt.Enc.FromStandardName(entry.Remote()))
|
2021-01-22 01:18:46 +08:00
|
|
|
if cleanErr != nil {
|
2020-03-18 19:53:25 +08:00
|
|
|
fs.Errorf(f, "Failed to cleanup bucket: %q", cleanErr)
|
|
|
|
err = cleanErr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-26 07:47:54 +08:00
|
|
|
// readMetaData gets the metadata if it hasn't already been fetched
|
|
|
|
//
|
|
|
|
// it also sets the info
|
|
|
|
func (o *Object) readMetaData() (err error) {
|
2019-08-16 22:31:48 +08:00
|
|
|
bucket, bucketPath := o.split()
|
|
|
|
bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
|
2017-06-26 07:47:54 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
// fs.Debugf(o, "Read metadata of key: %s", key)
|
|
|
|
resp, err := bucketInit.HeadObject(bucketPath, &qs.HeadObjectInput{})
|
2017-06-26 07:47:54 +08:00
|
|
|
if err != nil {
|
2019-08-16 22:31:48 +08:00
|
|
|
// fs.Debugf(o, "Read metadata failed, API Error: %v", err)
|
2017-06-26 07:47:54 +08:00
|
|
|
if e, ok := err.(*qsErr.QingStorError); ok {
|
|
|
|
if e.StatusCode == http.StatusNotFound {
|
|
|
|
return fs.ErrorObjectNotFound
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Ignore missing Content-Length assuming it is 0
|
|
|
|
if resp.ContentLength != nil {
|
|
|
|
o.size = *resp.ContentLength
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp.ETag != nil {
|
|
|
|
o.etag = qs.StringValue(resp.ETag)
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp.LastModified == nil {
|
|
|
|
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
|
|
|
|
o.lastModified = time.Now()
|
|
|
|
} else {
|
|
|
|
o.lastModified = *resp.LastModified
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp.ContentType != nil {
|
|
|
|
o.mimeType = qs.StringValue(resp.ContentType)
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp.XQSEncryptionCustomerAlgorithm != nil {
|
|
|
|
o.algo = qs.StringValue(resp.XQSEncryptionCustomerAlgorithm)
|
|
|
|
o.encrypted = true
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ModTime returns the modification date of the file
|
|
|
|
// It should return a best guess if one isn't available
|
2019-06-17 16:34:30 +08:00
|
|
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
2017-06-26 07:47:54 +08:00
|
|
|
err := o.readMetaData()
|
|
|
|
if err != nil {
|
|
|
|
fs.Logf(o, "Failed to read metadata, %v", err)
|
|
|
|
return time.Now()
|
|
|
|
}
|
|
|
|
modTime := o.lastModified
|
|
|
|
return modTime
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetModTime sets the modification time of the local fs object
|
2019-06-17 16:34:30 +08:00
|
|
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
2017-06-26 07:47:54 +08:00
|
|
|
err := o.readMetaData()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
o.lastModified = modTime
|
2019-06-17 16:34:30 +08:00
|
|
|
mimeType := fs.MimeType(ctx, o)
|
2017-06-26 07:47:54 +08:00
|
|
|
|
|
|
|
if o.size >= maxSizeForCopy {
|
|
|
|
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Copy the object to itself to update the metadata
|
2019-08-16 22:31:48 +08:00
|
|
|
bucket, bucketPath := o.split()
|
|
|
|
sourceKey := path.Join("/", bucket, bucketPath)
|
2017-06-26 07:47:54 +08:00
|
|
|
|
2019-08-16 22:31:48 +08:00
|
|
|
bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
|
2017-06-26 07:47:54 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
req := qs.PutObjectInput{
|
|
|
|
XQSCopySource: &sourceKey,
|
|
|
|
ContentType: &mimeType,
|
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
_, err = bucketInit.PutObject(bucketPath, &req)
|
2017-06-26 07:47:54 +08:00
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
2019-06-17 16:34:30 +08:00
|
|
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
2019-08-16 22:31:48 +08:00
|
|
|
bucket, bucketPath := o.split()
|
|
|
|
bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
|
2017-06-26 07:47:54 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
req := qs.GetObjectInput{}
|
2019-08-06 22:18:08 +08:00
|
|
|
fs.FixRangeOption(options, o.size)
|
2017-06-26 07:47:54 +08:00
|
|
|
for _, option := range options {
|
|
|
|
switch option.(type) {
|
|
|
|
case *fs.RangeOption, *fs.SeekOption:
|
|
|
|
_, value := option.Header()
|
|
|
|
req.Range = &value
|
|
|
|
default:
|
|
|
|
if option.Mandatory() {
|
|
|
|
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
resp, err := bucketInit.GetObject(bucketPath, &req)
|
2017-06-26 07:47:54 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return resp.Body, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update in to the object
|
2019-06-17 16:34:30 +08:00
|
|
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
2017-06-26 07:47:54 +08:00
|
|
|
// The maximum size of upload object is multipartUploadSize * MaxMultipleParts
|
2019-08-16 22:31:48 +08:00
|
|
|
bucket, bucketPath := o.split()
|
2019-08-23 04:30:55 +08:00
|
|
|
err := o.fs.makeBucket(ctx, bucket)
|
2017-06-26 07:47:54 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-08-15 16:28:22 +08:00
|
|
|
// Guess the content type
|
2019-06-17 16:34:30 +08:00
|
|
|
mimeType := fs.MimeType(ctx, src)
|
2017-06-26 07:47:54 +08:00
|
|
|
|
2017-08-15 16:28:22 +08:00
|
|
|
req := uploadInput{
|
2019-01-04 19:24:20 +08:00
|
|
|
body: in,
|
|
|
|
qsSvc: o.fs.svc,
|
2019-08-16 22:31:48 +08:00
|
|
|
bucket: bucket,
|
2019-01-04 19:24:20 +08:00
|
|
|
zone: o.fs.zone,
|
2019-08-16 22:31:48 +08:00
|
|
|
key: bucketPath,
|
2019-01-04 19:24:20 +08:00
|
|
|
mimeType: mimeType,
|
|
|
|
partSize: int64(o.fs.opt.ChunkSize),
|
|
|
|
concurrency: o.fs.opt.UploadConcurrency,
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
2017-08-15 16:28:22 +08:00
|
|
|
uploader := newUploader(&req)
|
2017-06-26 07:47:54 +08:00
|
|
|
|
2019-01-04 19:24:20 +08:00
|
|
|
size := src.Size()
|
|
|
|
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
|
|
|
if multipart {
|
|
|
|
err = uploader.upload()
|
|
|
|
} else {
|
|
|
|
err = uploader.singlePartUpload(in, size)
|
|
|
|
}
|
2017-06-26 07:47:54 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Read Metadata of object
|
|
|
|
err = o.readMetaData()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove this object
|
2019-06-17 16:34:30 +08:00
|
|
|
func (o *Object) Remove(ctx context.Context) error {
|
2019-08-16 22:31:48 +08:00
|
|
|
bucket, bucketPath := o.split()
|
|
|
|
bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
|
2017-06-26 07:47:54 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-08-16 22:31:48 +08:00
|
|
|
_, err = bucketInit.DeleteObject(bucketPath)
|
2017-06-26 07:47:54 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fs returns read only access to the Fs that this object is part of
|
|
|
|
func (o *Object) Fs() fs.Info {
|
|
|
|
return o.fs
|
|
|
|
}
|
|
|
|
|
2017-08-03 21:31:55 +08:00
|
|
|
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
|
|
|
|
|
2017-06-26 07:47:54 +08:00
|
|
|
// Hash returns the selected checksum of the file
|
|
|
|
// If no checksum is available it returns ""
|
2019-06-17 16:34:30 +08:00
|
|
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
2018-01-19 04:27:52 +08:00
|
|
|
if t != hash.MD5 {
|
|
|
|
return "", hash.ErrUnsupported
|
2017-06-26 07:47:54 +08:00
|
|
|
}
|
|
|
|
etag := strings.Trim(strings.ToLower(o.etag), `"`)
|
|
|
|
// Check the etag is a valid md5sum
|
|
|
|
if !matchMd5.MatchString(etag) {
|
|
|
|
fs.Debugf(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
|
|
|
|
return "", nil
|
|
|
|
}
|
|
|
|
return etag, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Storable says whether this object can be stored
|
|
|
|
func (o *Object) Storable() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// String returns a description of the Object
|
|
|
|
func (o *Object) String() string {
|
|
|
|
if o == nil {
|
|
|
|
return "<nil>"
|
|
|
|
}
|
|
|
|
return o.remote
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remote returns the remote path
|
|
|
|
func (o *Object) Remote() string {
|
|
|
|
return o.remote
|
|
|
|
}
|
|
|
|
|
|
|
|
// Size returns the size of the file
|
|
|
|
func (o *Object) Size() int64 {
|
|
|
|
return o.size
|
|
|
|
}
|
|
|
|
|
|
|
|
// MimeType of an Object if known, "" otherwise
|
2019-06-17 16:34:30 +08:00
|
|
|
func (o *Object) MimeType(ctx context.Context) string {
|
2017-06-26 07:47:54 +08:00
|
|
|
err := o.readMetaData()
|
|
|
|
if err != nil {
|
|
|
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return o.mimeType
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the interfaces are satisfied
|
|
|
|
var (
|
2020-03-18 19:53:25 +08:00
|
|
|
_ fs.Fs = &Fs{}
|
|
|
|
_ fs.CleanUpper = &Fs{}
|
|
|
|
_ fs.Copier = &Fs{}
|
|
|
|
_ fs.Object = &Object{}
|
|
|
|
_ fs.ListRer = &Fs{}
|
|
|
|
_ fs.MimeTyper = &Object{}
|
2017-06-26 07:47:54 +08:00
|
|
|
)
|