2023-10-20 19:35:56 +08:00
|
|
|
// Package linkbox provides an interface to the linkbox.to Cloud storage system.
|
2023-11-17 17:28:05 +08:00
|
|
|
//
|
|
|
|
// API docs: https://www.linkbox.to/api-docs
|
2023-10-20 19:35:56 +08:00
|
|
|
package linkbox
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
/*
|
|
|
|
Extras
|
|
|
|
- PublicLink - NO - sharing doesn't share the actual file, only a page with it on
|
|
|
|
- Move - YES - have Move and Rename file APIs so is possible
|
|
|
|
- MoveDir - NO - probably not possible - have Move but no Rename
|
|
|
|
*/
|
|
|
|
|
2023-10-20 19:35:56 +08:00
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"crypto/md5"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"path"
|
2023-11-17 17:28:05 +08:00
|
|
|
"regexp"
|
2023-10-20 19:35:56 +08:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/config/configmap"
|
|
|
|
"github.com/rclone/rclone/fs/config/configstruct"
|
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
|
|
|
"github.com/rclone/rclone/fs/fshttp"
|
|
|
|
"github.com/rclone/rclone/fs/hash"
|
2023-11-17 17:28:05 +08:00
|
|
|
"github.com/rclone/rclone/lib/dircache"
|
2023-10-20 19:35:56 +08:00
|
|
|
"github.com/rclone/rclone/lib/pacer"
|
|
|
|
"github.com/rclone/rclone/lib/rest"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2024-01-10 22:02:20 +08:00
|
|
|
maxEntitiesPerPage = 1000
|
2023-10-20 19:35:56 +08:00
|
|
|
minSleep = 200 * time.Millisecond
|
|
|
|
maxSleep = 2 * time.Second
|
|
|
|
pacerBurst = 1
|
|
|
|
linkboxAPIURL = "https://www.linkbox.to/api/open/"
|
2023-11-17 17:28:05 +08:00
|
|
|
rootID = "0" // ID of root directory
|
2023-10-20 19:35:56 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
fsi := &fs.RegInfo{
|
|
|
|
Name: "linkbox",
|
|
|
|
Description: "Linkbox",
|
|
|
|
NewFs: NewFs,
|
|
|
|
Options: []fs.Option{{
|
|
|
|
Name: "token",
|
|
|
|
Help: "Token from https://www.linkbox.to/admin/account",
|
|
|
|
Sensitive: true,
|
|
|
|
Required: true,
|
|
|
|
}},
|
|
|
|
}
|
|
|
|
fs.Register(fsi)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Options defines the configuration for this backend
|
|
|
|
type Options struct {
|
|
|
|
Token string `config:"token"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fs stores the interface to the remote Linkbox files
|
|
|
|
type Fs struct {
|
|
|
|
name string
|
|
|
|
root string
|
2023-11-17 17:28:05 +08:00
|
|
|
opt Options // options for this backend
|
|
|
|
features *fs.Features // optional features
|
|
|
|
ci *fs.ConfigInfo // global config
|
|
|
|
srv *rest.Client // the connection to the server
|
|
|
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
2023-10-20 19:35:56 +08:00
|
|
|
pacer *fs.Pacer
|
|
|
|
}
|
|
|
|
|
|
|
|
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
|
|
|
|
type Object struct {
|
|
|
|
fs *Fs
|
|
|
|
remote string
|
|
|
|
size int64
|
|
|
|
modTime time.Time
|
|
|
|
contentType string
|
|
|
|
fullURL string
|
2023-11-17 17:28:05 +08:00
|
|
|
dirID int64
|
|
|
|
itemID string // and these IDs are for files
|
|
|
|
id int64 // these IDs appear to apply to directories
|
2023-10-20 19:35:56 +08:00
|
|
|
isDir bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewFs creates a new Fs object from the name and root. It connects to
|
|
|
|
// the host specified in the config file.
|
|
|
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
2023-11-17 17:28:05 +08:00
|
|
|
root = strings.Trim(root, "/")
|
2023-10-20 19:35:56 +08:00
|
|
|
// Parse config into Options struct
|
|
|
|
|
|
|
|
opt := new(Options)
|
|
|
|
err := configstruct.Set(m, opt)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
ci := fs.GetConfig(ctx)
|
|
|
|
|
|
|
|
f := &Fs{
|
2023-11-17 17:28:05 +08:00
|
|
|
name: name,
|
|
|
|
opt: *opt,
|
|
|
|
root: root,
|
|
|
|
ci: ci,
|
|
|
|
srv: rest.NewClient(fshttp.NewClient(ctx)),
|
|
|
|
|
2023-10-20 19:35:56 +08:00
|
|
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep))),
|
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
f.dirCache = dircache.New(root, rootID, f)
|
2023-10-20 19:35:56 +08:00
|
|
|
|
|
|
|
f.features = (&fs.Features{
|
|
|
|
CanHaveEmptyDirectories: true,
|
|
|
|
CaseInsensitive: true,
|
|
|
|
}).Fill(ctx, f)
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
// Find the current root
|
|
|
|
err = f.dirCache.FindRoot(ctx, false)
|
2023-10-20 19:35:56 +08:00
|
|
|
if err != nil {
|
2023-11-17 17:28:05 +08:00
|
|
|
// Assume it is a file
|
|
|
|
newRoot, remote := dircache.SplitPath(root)
|
|
|
|
tempF := *f
|
|
|
|
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
|
|
|
tempF.root = newRoot
|
|
|
|
// Make new Fs which is the parent
|
|
|
|
err = tempF.dirCache.FindRoot(ctx, false)
|
|
|
|
if err != nil {
|
|
|
|
// No root so return old f
|
2023-10-20 19:35:56 +08:00
|
|
|
return f, nil
|
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
_, err := tempF.NewObject(ctx, remote)
|
|
|
|
if err != nil {
|
|
|
|
if err == fs.ErrorObjectNotFound {
|
|
|
|
// File doesn't exist so return old f
|
|
|
|
return f, nil
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
f.features.Fill(ctx, &tempF)
|
|
|
|
// XXX: update the old f here instead of returning tempF, since
|
|
|
|
// `features` were already filled with functions having *f as a receiver.
|
|
|
|
// See https://github.com/rclone/rclone/issues/2182
|
|
|
|
f.dirCache = tempF.dirCache
|
|
|
|
f.root = tempF.root
|
|
|
|
// return an error with an fs which points to the parent
|
|
|
|
return f, fs.ErrorIsFile
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
return f, nil
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type entity struct {
|
|
|
|
Type string `json:"type"`
|
|
|
|
Name string `json:"name"`
|
|
|
|
URL string `json:"url"`
|
|
|
|
Ctime int64 `json:"ctime"`
|
2023-11-17 17:28:05 +08:00
|
|
|
Size int64 `json:"size"`
|
|
|
|
ID int64 `json:"id"`
|
|
|
|
Pid int64 `json:"pid"`
|
2023-10-20 19:35:56 +08:00
|
|
|
ItemID string `json:"item_id"`
|
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
|
|
|
|
// Return true if the entity is a directory
|
|
|
|
func (e *entity) isDir() bool {
|
|
|
|
return e.Type == "dir" || e.Type == "sdir"
|
|
|
|
}
|
|
|
|
|
2023-10-20 19:35:56 +08:00
|
|
|
type data struct {
|
|
|
|
Entities []entity `json:"list"`
|
|
|
|
}
|
|
|
|
type fileSearchRes struct {
|
2023-11-17 17:28:05 +08:00
|
|
|
response
|
|
|
|
SearchData data `json:"data"`
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
// Set an object info from an entity
|
|
|
|
func (o *Object) set(e *entity) {
|
|
|
|
o.modTime = time.Unix(e.Ctime, 0)
|
|
|
|
o.contentType = e.Type
|
|
|
|
o.size = e.Size
|
|
|
|
o.fullURL = e.URL
|
|
|
|
o.isDir = e.isDir()
|
|
|
|
o.id = e.ID
|
|
|
|
o.itemID = e.ItemID
|
|
|
|
o.dirID = e.Pid
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
// Call linkbox with the query in opts and return result
|
|
|
|
//
|
|
|
|
// This will be checked for error and an error will be returned if Status != 1
|
2023-10-20 19:35:56 +08:00
|
|
|
func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result interface{}) error {
|
|
|
|
err := f.pacer.Call(func() (bool, error) {
|
2023-11-17 17:28:05 +08:00
|
|
|
resp, err := f.srv.CallJSON(ctx, opts, nil, &result)
|
|
|
|
return f.shouldRetry(ctx, resp, err)
|
2023-10-20 19:35:56 +08:00
|
|
|
})
|
2023-11-17 17:28:05 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
responser := result.(responser)
|
|
|
|
if responser.IsError() {
|
|
|
|
return responser
|
|
|
|
}
|
|
|
|
return nil
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
// list the objects into the function supplied
|
|
|
|
//
|
|
|
|
// If directories is set it only sends directories
|
|
|
|
// User function to process a File item from listAll
|
|
|
|
//
|
|
|
|
// Should return true to finish processing
|
|
|
|
type listAllFn func(*entity) bool
|
2023-10-20 19:35:56 +08:00
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
// Search is a bit fussy about which characters match
|
|
|
|
//
|
|
|
|
// If the name doesn't match this then do an dir list instead
|
2024-01-07 00:47:55 +08:00
|
|
|
// N.B.: Linkbox doesn't support search by name that is longer than 50 chars
|
2024-01-10 22:02:20 +08:00
|
|
|
var searchOK = regexp.MustCompile(`^[a-zA-Z0-9_ -.]{1,50}$`)
|
2023-10-20 19:35:56 +08:00
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
// Lists the directory required calling the user function on each item found
|
|
|
|
//
|
|
|
|
// If the user fn ever returns true then it early exits with found = true
|
|
|
|
//
|
|
|
|
// If you set name then search ignores dirID. name is a substring
|
|
|
|
// search also so name="dir" matches "sub dir" also. This filters it
|
|
|
|
// down so it only returns items in dirID
|
|
|
|
func (f *Fs) listAll(ctx context.Context, dirID string, name string, fn listAllFn) (found bool, err error) {
|
|
|
|
var (
|
|
|
|
pageNumber = 0
|
|
|
|
numberOfEntities = maxEntitiesPerPage
|
|
|
|
)
|
|
|
|
name = strings.TrimSpace(name) // search doesn't like spaces
|
|
|
|
if !searchOK.MatchString(name) {
|
|
|
|
// If name isn't good then do an unbounded search
|
|
|
|
name = ""
|
|
|
|
}
|
2024-01-07 00:47:55 +08:00
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
OUTER:
|
2023-10-20 19:35:56 +08:00
|
|
|
for numberOfEntities == maxEntitiesPerPage {
|
|
|
|
pageNumber++
|
2023-11-17 17:28:05 +08:00
|
|
|
opts := &rest.Opts{
|
|
|
|
Method: "GET",
|
|
|
|
RootURL: linkboxAPIURL,
|
|
|
|
Path: "file_search",
|
|
|
|
Parameters: url.Values{
|
|
|
|
"token": {f.opt.Token},
|
|
|
|
"name": {name},
|
|
|
|
"pid": {dirID},
|
|
|
|
"pageNo": {itoa(pageNumber)},
|
|
|
|
"pageSize": {itoa64(maxEntitiesPerPage)},
|
|
|
|
},
|
|
|
|
}
|
2023-10-20 19:35:56 +08:00
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
var responseResult fileSearchRes
|
2023-10-20 19:35:56 +08:00
|
|
|
err = getUnmarshaledResponse(ctx, f, opts, &responseResult)
|
|
|
|
if err != nil {
|
2023-11-17 17:28:05 +08:00
|
|
|
return false, fmt.Errorf("getting files failed: %w", err)
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
numberOfEntities = len(responseResult.SearchData.Entities)
|
|
|
|
|
|
|
|
for _, entity := range responseResult.SearchData.Entities {
|
2023-11-17 17:28:05 +08:00
|
|
|
if itoa64(entity.Pid) != dirID {
|
|
|
|
// when name != "" this returns from all directories, so ignore not this one
|
|
|
|
continue
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
if fn(&entity) {
|
|
|
|
found = true
|
|
|
|
break OUTER
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if pageNumber > 100000 {
|
2023-11-17 17:28:05 +08:00
|
|
|
return false, fmt.Errorf("too many results")
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
return found, nil
|
|
|
|
}
|
2023-10-20 19:35:56 +08:00
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
// Turn 64 bit int to string
|
|
|
|
func itoa64(i int64) string {
|
|
|
|
return strconv.FormatInt(i, 10)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Turn int to string
|
|
|
|
func itoa(i int) string {
|
|
|
|
return itoa64(int64(i))
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func splitDirAndName(remote string) (dir string, name string) {
|
|
|
|
lastSlashPosition := strings.LastIndex(remote, "/")
|
|
|
|
if lastSlashPosition == -1 {
|
|
|
|
dir = ""
|
|
|
|
name = remote
|
|
|
|
} else {
|
|
|
|
dir = remote[:lastSlashPosition]
|
|
|
|
name = remote[lastSlashPosition+1:]
|
|
|
|
}
|
|
|
|
|
|
|
|
// fs.Debugf(nil, "splitDirAndName remote = {%s}, dir = {%s}, name = {%s}", remote, dir, name)
|
|
|
|
|
|
|
|
return dir, name
|
|
|
|
}
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
// FindLeaf finds a directory of name leaf in the folder with ID directoryID
|
|
|
|
func (f *Fs) FindLeaf(ctx context.Context, directoryID, leaf string) (directoryIDOut string, found bool, err error) {
|
|
|
|
// Find the leaf in directoryID
|
|
|
|
found, err = f.listAll(ctx, directoryID, leaf, func(entity *entity) bool {
|
|
|
|
if entity.isDir() && strings.EqualFold(entity.Name, leaf) {
|
|
|
|
directoryIDOut = itoa64(entity.ID)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
})
|
|
|
|
return directoryIDOut, found, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returned from "folder_create"
|
|
|
|
type folderCreateRes struct {
|
|
|
|
response
|
|
|
|
Data struct {
|
|
|
|
DirID int64 `json:"dirId"`
|
|
|
|
} `json:"data"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateDir makes a directory with dirID as parent and name leaf
|
|
|
|
func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, err error) {
|
|
|
|
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
|
|
|
|
opts := &rest.Opts{
|
|
|
|
Method: "GET",
|
|
|
|
RootURL: linkboxAPIURL,
|
|
|
|
Path: "folder_create",
|
|
|
|
Parameters: url.Values{
|
|
|
|
"token": {f.opt.Token},
|
|
|
|
"name": {leaf},
|
|
|
|
"pid": {dirID},
|
|
|
|
"isShare": {"0"},
|
|
|
|
"canInvite": {"1"},
|
|
|
|
"canShare": {"1"},
|
|
|
|
"withBodyImg": {"1"},
|
|
|
|
"desc": {""},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
response := folderCreateRes{}
|
|
|
|
err = getUnmarshaledResponse(ctx, f, opts, &response)
|
|
|
|
if err != nil {
|
|
|
|
// response status 1501 means that directory already exists
|
|
|
|
if response.Status == 1501 {
|
|
|
|
return newID, fmt.Errorf("couldn't find already created directory: %w", fs.ErrorDirNotFound)
|
|
|
|
}
|
|
|
|
return newID, fmt.Errorf("CreateDir failed: %w", err)
|
|
|
|
|
|
|
|
}
|
|
|
|
if response.Data.DirID == 0 {
|
|
|
|
return newID, fmt.Errorf("API returned 0 for ID of newly created directory")
|
|
|
|
}
|
|
|
|
return itoa64(response.Data.DirID), nil
|
|
|
|
}
|
|
|
|
|
2023-10-20 19:35:56 +08:00
|
|
|
// List the objects and directories in dir into entries. The
|
|
|
|
// entries can be returned in any order but should be for a
|
|
|
|
// complete directory.
|
|
|
|
//
|
|
|
|
// dir should be "" to list the root, and should not have
|
|
|
|
// trailing slashes.
|
|
|
|
//
|
|
|
|
// This should return ErrDirNotFound if the directory isn't
|
|
|
|
// found.
|
|
|
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
|
|
// fs.Debugf(f, "List method dir = {%s}", dir)
|
2023-11-17 17:28:05 +08:00
|
|
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
2023-10-20 19:35:56 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
_, err = f.listAll(ctx, directoryID, "", func(entity *entity) bool {
|
|
|
|
remote := path.Join(dir, entity.Name)
|
|
|
|
if entity.isDir() {
|
|
|
|
id := itoa64(entity.ID)
|
|
|
|
modTime := time.Unix(entity.Ctime, 0)
|
|
|
|
d := fs.NewDir(remote, modTime).SetID(id).SetParentID(itoa64(entity.Pid))
|
|
|
|
entries = append(entries, d)
|
|
|
|
// cache the directory ID for later lookups
|
|
|
|
f.dirCache.Put(remote, id)
|
2023-10-20 19:35:56 +08:00
|
|
|
} else {
|
2023-11-17 17:28:05 +08:00
|
|
|
o := &Object{
|
|
|
|
fs: f,
|
|
|
|
remote: remote,
|
|
|
|
}
|
|
|
|
o.set(entity)
|
|
|
|
entries = append(entries, o)
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
return false
|
2023-10-20 19:35:56 +08:00
|
|
|
})
|
2023-11-17 17:28:05 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
return entries, nil
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
// get an entity with leaf from dirID
|
|
|
|
func getEntity(ctx context.Context, f *Fs, leaf string, directoryID string, token string) (*entity, error) {
|
|
|
|
var result *entity
|
|
|
|
var resultErr = fs.ErrorObjectNotFound
|
|
|
|
_, err := f.listAll(ctx, directoryID, leaf, func(entity *entity) bool {
|
|
|
|
if strings.EqualFold(entity.Name, leaf) {
|
|
|
|
// fs.Debugf(f, "getObject found entity.Name {%s} name {%s}", entity.Name, name)
|
|
|
|
if entity.isDir() {
|
|
|
|
result = nil
|
|
|
|
resultErr = fs.ErrorIsDir
|
|
|
|
} else {
|
|
|
|
result = entity
|
|
|
|
resultErr = nil
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
return true
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
return false
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
return result, resultErr
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewObject finds the Object at remote. If it can't be found
|
|
|
|
// it returns the error ErrorObjectNotFound.
|
|
|
|
//
|
|
|
|
// If remote points to a directory then it should return
|
|
|
|
// ErrorIsDir if possible without doing any extra work,
|
|
|
|
// otherwise ErrorObjectNotFound.
|
|
|
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
2023-11-17 17:28:05 +08:00
|
|
|
leaf, dirID, err := f.dirCache.FindPath(ctx, remote, false)
|
2023-10-20 19:35:56 +08:00
|
|
|
if err != nil {
|
2023-11-17 17:28:05 +08:00
|
|
|
if err == fs.ErrorDirNotFound {
|
|
|
|
return nil, fs.ErrorObjectNotFound
|
|
|
|
}
|
|
|
|
return nil, err
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
entity, err := getEntity(ctx, f, leaf, dirID, f.opt.Token)
|
2023-10-20 19:35:56 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
o := &Object{
|
|
|
|
fs: f,
|
|
|
|
remote: remote,
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
o.set(entity)
|
|
|
|
return o, nil
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Mkdir makes the directory (container, bucket)
|
|
|
|
//
|
|
|
|
// Shouldn't return an error if it already exists
|
|
|
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
2023-11-17 17:28:05 +08:00
|
|
|
_, err := f.dirCache.FindDir(ctx, dir, true)
|
|
|
|
return err
|
|
|
|
}
|
2023-10-20 19:35:56 +08:00
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|
|
|
if check {
|
|
|
|
entries, err := f.List(ctx, dir)
|
2023-10-20 19:35:56 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
if len(entries) != 0 {
|
|
|
|
return fs.ErrorDirectoryNotEmpty
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
2023-10-20 19:35:56 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
opts := &rest.Opts{
|
|
|
|
Method: "GET",
|
|
|
|
RootURL: linkboxAPIURL,
|
|
|
|
Path: "folder_del",
|
|
|
|
Parameters: url.Values{
|
|
|
|
"token": {f.opt.Token},
|
2023-11-17 17:28:05 +08:00
|
|
|
"dirIds": {directoryID},
|
2023-10-20 19:35:56 +08:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
response := response{}
|
2023-10-20 19:35:56 +08:00
|
|
|
err = getUnmarshaledResponse(ctx, f, opts, &response)
|
|
|
|
if err != nil {
|
2023-11-17 17:28:05 +08:00
|
|
|
// Linkbox has some odd error returns here
|
|
|
|
if response.Status == 403 || response.Status == 500 {
|
|
|
|
return fs.ErrorDirNotFound
|
|
|
|
}
|
|
|
|
return fmt.Errorf("purge error: %w", err)
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
f.dirCache.FlushDir(dir)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Rmdir removes the directory (container, bucket) if empty
|
|
|
|
//
|
|
|
|
// Return an error if it doesn't exist or isn't empty
|
|
|
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
|
|
return f.purgeCheck(ctx, dir, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetModTime sets modTime on a particular file
|
|
|
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|
|
|
return fs.ErrorCantSetModTime
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
|
|
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
|
|
|
var res *http.Response
|
|
|
|
downloadURL := o.fullURL
|
|
|
|
if downloadURL == "" {
|
|
|
|
_, name := splitDirAndName(o.Remote())
|
2023-11-17 17:28:05 +08:00
|
|
|
newObject, err := getEntity(ctx, o.fs, name, itoa64(o.dirID), o.fs.opt.Token)
|
2023-10-20 19:35:56 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
if newObject == nil {
|
2023-10-20 19:35:56 +08:00
|
|
|
// fs.Debugf(o.fs, "Open entity is empty: name = {%s}", name)
|
|
|
|
return nil, fs.ErrorObjectNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
downloadURL = newObject.URL
|
|
|
|
}
|
|
|
|
|
|
|
|
opts := &rest.Opts{
|
|
|
|
Method: "GET",
|
|
|
|
RootURL: downloadURL,
|
|
|
|
Options: options,
|
|
|
|
}
|
|
|
|
|
|
|
|
err := o.fs.pacer.Call(func() (bool, error) {
|
|
|
|
var err error
|
|
|
|
res, err = o.fs.srv.Call(ctx, opts)
|
2023-11-17 17:28:05 +08:00
|
|
|
return o.fs.shouldRetry(ctx, res, err)
|
2023-10-20 19:35:56 +08:00
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Open failed: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return res.Body, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update in to the object with the modTime given of the given size
|
|
|
|
//
|
|
|
|
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
|
|
|
|
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
|
|
|
// return an error or update the object properly (rather than e.g. calling panic).
|
2023-11-17 17:28:05 +08:00
|
|
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
|
|
|
size := src.Size()
|
|
|
|
if size == 0 {
|
2023-10-20 19:35:56 +08:00
|
|
|
return fs.ErrorCantUploadEmptyFiles
|
2023-11-17 17:28:05 +08:00
|
|
|
} else if size < 0 {
|
|
|
|
return fmt.Errorf("can't upload files of unknown length")
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
remote := o.Remote()
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
// remove the file if it exists
|
|
|
|
if o.itemID != "" {
|
|
|
|
fs.Debugf(o, "Update: removing old file")
|
|
|
|
err = o.Remove(ctx)
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(o, "Update: failed to remove existing file: %v", err)
|
|
|
|
}
|
|
|
|
o.itemID = ""
|
|
|
|
} else {
|
|
|
|
tmpObject, err := o.fs.NewObject(ctx, remote)
|
|
|
|
if err == nil {
|
|
|
|
fs.Debugf(o, "Update: removing old file")
|
|
|
|
err = tmpObject.Remove(ctx)
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(o, "Update: failed to remove existing file: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
first10m := io.LimitReader(in, 10_485_760)
|
|
|
|
first10mBytes, err := io.ReadAll(first10m)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Update err in reading file: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// get upload authorization (step 1)
|
|
|
|
opts := &rest.Opts{
|
|
|
|
Method: "GET",
|
|
|
|
RootURL: linkboxAPIURL,
|
|
|
|
Path: "get_upload_url",
|
|
|
|
Options: options,
|
|
|
|
Parameters: url.Values{
|
|
|
|
"token": {o.fs.opt.Token},
|
|
|
|
"fileMd5ofPre10m": {fmt.Sprintf("%x", md5.Sum(first10mBytes))},
|
2023-11-17 17:28:05 +08:00
|
|
|
"fileSize": {itoa64(size)},
|
2023-10-20 19:35:56 +08:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
getFirstStepResult := getUploadURLResponse{}
|
|
|
|
err = getUnmarshaledResponse(ctx, o.fs, opts, &getFirstStepResult)
|
2023-10-20 19:35:56 +08:00
|
|
|
if err != nil {
|
2023-11-17 17:28:05 +08:00
|
|
|
if getFirstStepResult.Status != 600 {
|
|
|
|
return fmt.Errorf("Update err in unmarshaling response: %w", err)
|
|
|
|
}
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
switch getFirstStepResult.Status {
|
2023-10-20 19:35:56 +08:00
|
|
|
case 1:
|
|
|
|
// upload file using link from first step
|
|
|
|
var res *http.Response
|
|
|
|
|
|
|
|
file := io.MultiReader(bytes.NewReader(first10mBytes), in)
|
|
|
|
|
|
|
|
opts := &rest.Opts{
|
2023-11-17 17:28:05 +08:00
|
|
|
Method: "PUT",
|
|
|
|
RootURL: getFirstStepResult.Data.SignURL,
|
|
|
|
Options: options,
|
|
|
|
Body: file,
|
|
|
|
ContentLength: &size,
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
|
|
|
res, err = o.fs.srv.Call(ctx, opts)
|
2023-11-17 17:28:05 +08:00
|
|
|
return o.fs.shouldRetry(ctx, res, err)
|
2023-10-20 19:35:56 +08:00
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("update err in uploading file: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = io.ReadAll(res.Body)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("update err in reading response: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
case 600:
|
|
|
|
// Status means that we don't need to upload file
|
|
|
|
// We need only to make second step
|
|
|
|
default:
|
2023-11-17 17:28:05 +08:00
|
|
|
return fmt.Errorf("got unexpected message from Linkbox: %s", getFirstStepResult.Message)
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
leaf, dirID, err := o.fs.dirCache.FindPath(ctx, remote, false)
|
2023-10-20 19:35:56 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// create file item at Linkbox (second step)
|
|
|
|
opts = &rest.Opts{
|
|
|
|
Method: "GET",
|
|
|
|
RootURL: linkboxAPIURL,
|
|
|
|
Path: "folder_upload_file",
|
|
|
|
Options: options,
|
|
|
|
Parameters: url.Values{
|
|
|
|
"token": {o.fs.opt.Token},
|
|
|
|
"fileMd5ofPre10m": {fmt.Sprintf("%x", md5.Sum(first10mBytes))},
|
2023-11-17 17:28:05 +08:00
|
|
|
"fileSize": {itoa64(size)},
|
|
|
|
"pid": {dirID},
|
|
|
|
"diyName": {leaf},
|
2023-10-20 19:35:56 +08:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
getSecondStepResult := getUploadURLResponse{}
|
|
|
|
err = getUnmarshaledResponse(ctx, o.fs, opts, &getSecondStepResult)
|
|
|
|
if err != nil {
|
2023-11-17 17:28:05 +08:00
|
|
|
return fmt.Errorf("Update second step failed: %w", err)
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
// Try a few times to read the object after upload for eventual consistency
|
|
|
|
const maxTries = 10
|
|
|
|
var sleepTime = 100 * time.Millisecond
|
|
|
|
var entity *entity
|
|
|
|
for try := 1; try <= maxTries; try++ {
|
|
|
|
entity, err = getEntity(ctx, o.fs, leaf, dirID, o.fs.opt.Token)
|
|
|
|
if err == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != fs.ErrorObjectNotFound {
|
|
|
|
return fmt.Errorf("Update failed to read object: %w", err)
|
|
|
|
}
|
|
|
|
fs.Debugf(o, "Trying to read object after upload: try again in %v (%d/%d)", sleepTime, try, maxTries)
|
|
|
|
time.Sleep(sleepTime)
|
|
|
|
sleepTime *= 2
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
o.set(entity)
|
2023-10-20 19:35:56 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove this object
|
|
|
|
func (o *Object) Remove(ctx context.Context) error {
|
|
|
|
opts := &rest.Opts{
|
|
|
|
Method: "GET",
|
|
|
|
RootURL: linkboxAPIURL,
|
|
|
|
Path: "file_del",
|
|
|
|
Parameters: url.Values{
|
|
|
|
"token": {o.fs.opt.Token},
|
2023-11-17 17:28:05 +08:00
|
|
|
"itemIds": {o.itemID},
|
2023-10-20 19:35:56 +08:00
|
|
|
},
|
|
|
|
}
|
|
|
|
requestResult := getUploadURLResponse{}
|
|
|
|
err := getUnmarshaledResponse(ctx, o.fs, opts, &requestResult)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not Remove: %w", err)
|
|
|
|
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ModTime returns the modification time of the remote http file
|
|
|
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
|
|
return o.modTime
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remote the name of the remote HTTP file, relative to the fs root
|
|
|
|
func (o *Object) Remote() string {
|
|
|
|
return o.remote
|
|
|
|
}
|
|
|
|
|
|
|
|
// Size returns the size in bytes of the remote http file
|
|
|
|
func (o *Object) Size() int64 {
|
|
|
|
return o.size
|
|
|
|
}
|
|
|
|
|
|
|
|
// String returns the URL to the remote HTTP file
|
|
|
|
func (o *Object) String() string {
|
|
|
|
if o == nil {
|
|
|
|
return "<nil>"
|
|
|
|
}
|
|
|
|
return o.remote
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fs is the filesystem this remote http file object is located within
|
|
|
|
func (o *Object) Fs() fs.Info {
|
|
|
|
return o.fs
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
|
|
|
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
|
|
|
return "", hash.ErrUnsupported
|
|
|
|
}
|
|
|
|
|
|
|
|
// Storable returns whether the remote http file is a regular file
|
|
|
|
// (not a directory, symbolic link, block device, character device, named pipe, etc.)
|
|
|
|
func (o *Object) Storable() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Features returns the optional features of this Fs
|
|
|
|
// Info provides a read only interface to information about a filesystem.
|
|
|
|
func (f *Fs) Features() *fs.Features {
|
|
|
|
return f.features
|
|
|
|
}
|
|
|
|
|
|
|
|
// Name of the remote (as passed into NewFs)
|
|
|
|
// Name returns the configured name of the file system
|
|
|
|
func (f *Fs) Name() string {
|
|
|
|
return f.name
|
|
|
|
}
|
|
|
|
|
|
|
|
// Root of the remote (as passed into NewFs)
|
|
|
|
func (f *Fs) Root() string {
|
|
|
|
return f.root
|
|
|
|
}
|
|
|
|
|
|
|
|
// String returns a description of the FS
|
|
|
|
func (f *Fs) String() string {
|
|
|
|
return fmt.Sprintf("Linkbox root '%s'", f.root)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Precision of the ModTimes in this Fs
|
|
|
|
func (f *Fs) Precision() time.Duration {
|
|
|
|
return fs.ModTimeNotSupported
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
|
|
|
// Returns the supported hash types of the filesystem
|
|
|
|
func (f *Fs) Hashes() hash.Set {
|
|
|
|
return hash.Set(hash.None)
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
{
|
|
|
|
"data": {
|
|
|
|
"signUrl": "http://xx -- Then CURL PUT your file with sign url "
|
|
|
|
},
|
|
|
|
"msg": "please use this url to upload (PUT method)",
|
|
|
|
"status": 1
|
|
|
|
}
|
|
|
|
*/
|
2023-11-17 17:28:05 +08:00
|
|
|
|
|
|
|
// All messages have these items
|
|
|
|
type response struct {
|
2023-10-20 19:35:56 +08:00
|
|
|
Message string `json:"msg"`
|
|
|
|
Status int `json:"status"`
|
|
|
|
}
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
// IsError returns whether response represents an error
|
|
|
|
func (r *response) IsError() bool {
|
|
|
|
return r.Status != 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error returns the error state of this response
|
|
|
|
func (r *response) Error() string {
|
|
|
|
return fmt.Sprintf("Linkbox error %d: %s", r.Status, r.Message)
|
|
|
|
}
|
|
|
|
|
|
|
|
// responser is interface covering the response so we can use it when it is embedded.
|
|
|
|
type responser interface {
|
|
|
|
IsError() bool
|
|
|
|
Error() string
|
|
|
|
}
|
|
|
|
|
2023-10-20 19:35:56 +08:00
|
|
|
type getUploadURLData struct {
|
|
|
|
SignURL string `json:"signUrl"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type getUploadURLResponse struct {
|
2023-11-17 17:28:05 +08:00
|
|
|
response
|
2023-10-20 19:35:56 +08:00
|
|
|
Data getUploadURLData `json:"data"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// Put in to the remote path with the modTime given of the given size
|
|
|
|
//
|
|
|
|
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
|
|
|
|
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
|
|
|
|
// return an error or upload it properly (rather than e.g. calling panic).
|
|
|
|
//
|
|
|
|
// May create the object even if it returns an error - if so
|
|
|
|
// will return the object and the error, otherwise will return
|
|
|
|
// nil and the error
|
|
|
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
|
|
o := &Object{
|
|
|
|
fs: f,
|
|
|
|
remote: src.Remote(),
|
|
|
|
size: src.Size(),
|
|
|
|
}
|
|
|
|
dir, _ := splitDirAndName(src.Remote())
|
|
|
|
err := f.Mkdir(ctx, dir)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
err = o.Update(ctx, in, src, options...)
|
|
|
|
return o, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Purge all files in the directory specified
|
|
|
|
//
|
|
|
|
// Implement this if you have a way of deleting all the files
|
|
|
|
// quicker than just running Remove() on the result of List()
|
|
|
|
//
|
|
|
|
// Return an error if it doesn't exist
|
|
|
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|
|
|
return f.purgeCheck(ctx, dir, false)
|
|
|
|
}
|
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
// retryErrorCodes is a slice of error codes that we will retry
|
|
|
|
var retryErrorCodes = []int{
|
|
|
|
429, // Too Many Requests.
|
|
|
|
500, // Internal Server Error
|
|
|
|
502, // Bad Gateway
|
|
|
|
503, // Service Unavailable
|
|
|
|
504, // Gateway Timeout
|
|
|
|
509, // Bandwidth Limit Exceeded
|
|
|
|
}
|
2023-10-20 19:35:56 +08:00
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
// shouldRetry determines whether a given err rates being retried
|
|
|
|
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
|
|
|
if fserrors.ContextError(ctx, &err) {
|
|
|
|
return false, err
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
2023-11-17 17:28:05 +08:00
|
|
|
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
|
|
|
}
|
2023-10-20 19:35:56 +08:00
|
|
|
|
2023-11-17 17:28:05 +08:00
|
|
|
// DirCacheFlush resets the directory cache - used in testing as an
|
|
|
|
// optional interface
|
|
|
|
func (f *Fs) DirCacheFlush() {
|
|
|
|
f.dirCache.ResetRoot()
|
2023-10-20 19:35:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the interfaces are satisfied
|
|
|
|
var (
|
2023-11-17 17:28:05 +08:00
|
|
|
_ fs.Fs = &Fs{}
|
|
|
|
_ fs.Purger = &Fs{}
|
|
|
|
_ fs.DirCacheFlusher = &Fs{}
|
|
|
|
_ fs.Object = &Object{}
|
2023-10-20 19:35:56 +08:00
|
|
|
)
|