diff --git a/README.md b/README.md index ddd94d53a..d4cea47f6 100644 --- a/README.md +++ b/README.md @@ -94,6 +94,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and * SugarSync [:page_facing_up:](https://rclone.org/sugarsync/) * Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2) * Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos) + * Uloz.to [:page_facing_up:](https://rclone.org/ulozto/) * Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi) * WebDAV [:page_facing_up:](https://rclone.org/webdav/) * Yandex Disk [:page_facing_up:](https://rclone.org/yandex/) diff --git a/backend/all/all.go b/backend/all/all.go index e90877eda..f097cb261 100644 --- a/backend/all/all.go +++ b/backend/all/all.go @@ -53,6 +53,7 @@ import ( _ "github.com/rclone/rclone/backend/storj" _ "github.com/rclone/rclone/backend/sugarsync" _ "github.com/rclone/rclone/backend/swift" + _ "github.com/rclone/rclone/backend/ulozto" _ "github.com/rclone/rclone/backend/union" _ "github.com/rclone/rclone/backend/uptobox" _ "github.com/rclone/rclone/backend/webdav" diff --git a/backend/ulozto/api/types.go b/backend/ulozto/api/types.go new file mode 100644 index 000000000..6f70e3b0b --- /dev/null +++ b/backend/ulozto/api/types.go @@ -0,0 +1,244 @@ +// Package api has type definitions for uloz.to +package api + +import ( + "errors" + "fmt" + "time" +) + +// Error is a representation of the JSON structure returned by uloz.to for unsuccessful requests. +type Error struct { + ErrorCode int `json:"error"` + StatusCode int `json:"code"` + Message string `json:"message"` +} + +// Error implements error.Error() and returns a string representation of the error. +func (e *Error) Error() string { + out := fmt.Sprintf("Error %d (%d)", e.ErrorCode, e.StatusCode) + if e.Message != "" { + out += ": " + e.Message + } + return out +} + +// Is determines if the error is an instance of another error. It's required for the +// errors package to search in causal chain. +func (e *Error) Is(target error) bool { + var err *Error + ok := errors.As(target, &err) + return ok +} + +// ListResponseMetadata groups fields common for all API List calls, +// and maps to the Metadata API JSON object. +type ListResponseMetadata struct { + Timestamp time.Time `json:"RunAt"` + Offset int32 `json:"offset"` + Limit int32 `json:"limit"` + ItemsCount int32 `json:"items_count"` +} + +// Folder represents a single folder, and maps to the AggregatePrivateViewFolder +// JSON API object. +type Folder struct { + Discriminator string `json:"discriminator"` + Name string `json:"name"` + SanitizedName string `json:"name_sanitized"` + Slug string `json:"slug"` + Status string `json:"status"` + PublicURL string `json:"public_url"` + IsPasswordProtected bool `json:"is_password_protected"` + Type string `json:"type"` + FileManagerLink string `json:"file_manager_link"` + ParentFolderSlug string `json:"parent_folder_slug"` + Privacy string `json:"privacy"` + Created time.Time `json:"created"` + LastUserModified time.Time `json:"last_user_modified"` + HasSubfolder bool `json:"has_subfolder"` + HasTrashedSubfolders bool `json:"has_trashed_subfolders"` +} + +// File represents a single file, and maps to the AggregatePrivateViewFileV3 +// JSON API object. +type File struct { + Discriminator string `json:"discriminator"` + Slug string `json:"slug"` + URL string `json:"url"` + Realm string `json:"realm"` + Name string `json:"name"` + NameSanitized string `json:"name_sanitized"` + Extension string `json:"extension"` + Filesize int64 `json:"filesize"` + PasswordProtectedFile bool `json:"password_protected_file"` + Description string `json:"description"` + DescriptionSanitized string `json:"description_sanitized"` + IsPorn bool `json:"is_porn"` + Rating int `json:"rating"` + PasswordProtectedArchive bool `json:"password_protected_archive"` + MalwareStatus string `json:"malware_status"` + ContentStatus string `json:"content_status"` + ContentType string `json:"content_type"` + Format struct { + } `json:"format"` + DownloadTypes []interface{} `json:"download_types"` + ThumbnailInfo []interface{} `json:"thumbnail_info"` + PreviewInfo struct { + } `json:"preview_info"` + Privacy string `json:"privacy"` + IsPornByUploader bool `json:"is_porn_by_uploader"` + ExpireDownload int `json:"expire_download"` + ExpireTime time.Time `json:"expire_time"` + UploadTime time.Time `json:"upload_time"` + LastUserModified time.Time `json:"last_user_modified"` + FolderSlug string `json:"folder_slug"` + IsIncomplete bool `json:"is_incomplete"` + IsInTrash bool `json:"is_in_trash"` + Processing struct { + Identify bool `json:"identify"` + Thumbnails bool `json:"thumbnails"` + LivePreview bool `json:"live_preview"` + ArchiveContent bool `json:"archive_content"` + Preview bool `json:"preview"` + } `json:"processing"` +} + +// CreateFolderRequest represents the JSON API object +// that's sent to the create folder API endpoint. +type CreateFolderRequest struct { + Name string `json:"name"` + ParentFolderSlug string `json:"parent_folder_slug"` +} + +// ListFoldersResponse represents the JSON API object +// that's received from the list folders API endpoint. +type ListFoldersResponse struct { + Metadata ListResponseMetadata `json:"metadata"` + Folder Folder `json:"folder"` + Subfolders []Folder `json:"subfolders"` +} + +// ListFilesResponse represents the JSON API object +// that's received from the list files API endpoint. +type ListFilesResponse struct { + Metadata ListResponseMetadata `json:"metadata"` + Items []File `json:"items"` +} + +// DeleteFoldersRequest represents the JSON API object +// that's sent to the delete folders API endpoint. +type DeleteFoldersRequest struct { + Slugs []string `json:"slugs"` +} + +// CreateUploadURLRequest represents the JSON API object that's +// sent to the API endpoint generating URLs for new file uploads. +type CreateUploadURLRequest struct { + UserLogin string `json:"user_login"` + Realm string `json:"realm"` + ExistingSessionSlug string `json:"private_slug"` +} + +// CreateUploadURLResponse represents the JSON API object that's +// received from the API endpoint generating URLs for new file uploads. +type CreateUploadURLResponse struct { + UploadURL string `json:"upload_url"` + PrivateSlug string `json:"private_slug"` + ValidUntil time.Time `json:"valid_until"` + ValidityInterval int64 `json:"validity_interval"` +} + +// BatchUpdateFilePropertiesRequest represents the JSON API object that's +// sent to the API endpoint moving the uploaded files from a scratch space +// to their final destination. +type BatchUpdateFilePropertiesRequest struct { + Name string `json:"name"` + FolderSlug string `json:"folder_slug"` + Description string `json:"description"` + Slugs []string `json:"slugs"` + UploadTokens map[string]string `json:"upload_tokens"` +} + +// SendFilePayloadResponse represents the JSON API object that's received +// in response to uploading a file's body to the CDN URL. +type SendFilePayloadResponse struct { + Size int `json:"size"` + ContentType string `json:"contentType"` + Md5 string `json:"md5"` + Message string `json:"message"` + ReturnCode int `json:"return_code"` + Slug string `json:"slug"` +} + +// CommitUploadBatchRequest represents the JSON API object that's +// sent to the API endpoint marking the upload batch as final. +type CommitUploadBatchRequest struct { + Status string `json:"status"` + OwnerLogin string `json:"owner_login"` +} + +// CommitUploadBatchResponse represents the JSON API object that's +// received from the API endpoint marking the upload batch as final. +type CommitUploadBatchResponse struct { + PrivateSlug string `json:"private_slug"` + PublicSlug string `json:"public_slug"` + Status string `json:"status"` + ConfirmedAt time.Time `json:"confirmed_at"` + Discriminator string `json:"discriminator"` + Privacy string `json:"privacy"` + Name time.Time `json:"name"` + PublicURL string `json:"public_url"` + FilesCountOk int `json:"files_count_ok"` + FilesCountTrash int `json:"files_count_trash"` + FilesCountIncomplete int `json:"files_count_incomplete"` +} + +// UpdateDescriptionRequest represents the JSON API object that's +// sent to the file modification API endpoint marking the upload batch as final. +type UpdateDescriptionRequest struct { + Description string `json:"description"` +} + +// GetDownloadLinkRequest represents the JSON API object that's +// sent to the API endpoint that generates CDN download links for file payloads. +type GetDownloadLinkRequest struct { + Slug string `json:"file_slug"` + UserLogin string `json:"user_login"` + DeviceID string `json:"device_id"` +} + +// GetDownloadLinkResponse represents the JSON API object that's +// received from the API endpoint that generates CDN download links for file payloads. +type GetDownloadLinkResponse struct { + Link string `json:"link"` + DownloadURLValidUntil time.Time `json:"download_url_valid_until"` + DownloadURLValidityInterval int `json:"download_url_validity_interval"` + Hash string `json:"hash"` +} + +// AuthenticateRequest represents the JSON API object that's sent to the auth API endpoint. +type AuthenticateRequest struct { + Login string `json:"login"` + Password string `json:"password"` +} + +// AuthenticateResponse represents the JSON API object that's received from the auth API endpoint. +type AuthenticateResponse struct { + TokenID string `json:"token_id"` + TokenValidityInterval int `json:"token_validity_interval"` + Session struct { + Country string `json:"country"` + IsLimitedCountry bool `json:"is_limited_country"` + User struct { + Login string `json:"login"` + UserID int64 `json:"user_id"` + Credit int64 `json:"credit"` + AvatarURL string `json:"avatar_url"` + FavoritesLink string `json:"favorites_link"` + RootFolderSlug string `json:"root_folder_slug"` + FavoritesFolderSlug string `json:"favorites_folder_slug"` + HasCloud bool `json:"has_cloud"` + } `json:"user"` + } `json:"session"` +} diff --git a/backend/ulozto/ulozto.go b/backend/ulozto/ulozto.go new file mode 100644 index 000000000..b5033107a --- /dev/null +++ b/backend/ulozto/ulozto.go @@ -0,0 +1,1187 @@ +// Package ulozto provides an interface to the Uloz.to storage system. +package ulozto + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/gob" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/backend/ulozto/api" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/lib/dircache" + "github.com/rclone/rclone/lib/encoder" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/rest" +) + +// TODO Uloz.to only supports file names of 255 characters or less and silently truncates names that are longer. + +const ( + minSleep = 10 * time.Millisecond + maxSleep = 2 * time.Second + decayConstant = 2 // bigger for slower decay, exponential + rootURL = "https://apis.uloz.to" + // TODO temporary limitation, remove with chunked upload impl + maxFileSizeBytes = 2500 * 1024 * 1024 +) + +// Options defines the configuration for this backend +type Options struct { + AppToken string `config:"app_token"` + Username string `config:"username"` + Password string `config:"password"` + RootFolderSlug string `config:"root_folder_slug"` + Enc encoder.MultiEncoder `config:"encoding"` + ListPageSize int `config:"list_page_size"` +} + +func init() { + fs.Register(&fs.RegInfo{ + Name: "ulozto", + Description: "Uloz.to", + NewFs: NewFs, + Options: []fs.Option{ + { + Name: "app_token", + Default: "", + Help: "The application token identifying the app. An app API key can be either found in the API " + + "doc https://uloz.to/upload-resumable-api-beta or obtained from customer service.", + Sensitive: true, + }, + { + Name: "username", + Default: "", + Help: "The username of the principal to operate as.", + Sensitive: true, + }, + { + Name: "password", + Default: "", + Help: "The password for the user.", + Sensitive: true, + }, + { + Name: "root_folder_slug", + Help: "If set, rclone will use this folder as the root folder for all operations. For example, " + + "if the slug identifies 'foo/bar/', 'ulozto:baz' is equivalent to 'ulozto:foo/bar/baz' without " + + "any root slug set.", + Default: "", + Advanced: true, + Sensitive: true, + }, + { + Name: "list_page_size", + Default: 500, + Help: "The size of a single page for list commands. 1-500", + Advanced: true, + }, + { + Name: config.ConfigEncoding, + Help: config.ConfigEncodingHelp, + Advanced: true, + Default: encoder.Display | encoder.EncodeInvalidUtf8 | encoder.EncodeBackSlash, + }, + }}) +} + +// Fs represents a remote uloz.to storage +type Fs struct { + name string // name of this remote + root string // the path we are working on + opt Options // parsed options + features *fs.Features // optional features + rest *rest.Client // REST client with authentication headers set, used to communicate with API endpoints + cdn *rest.Client // REST client without authentication headers set, used for CDN payload upload/download + dirCache *dircache.DirCache // Map of directory path to directory id + pacer *fs.Pacer // pacer for API calls +} + +// NewFs constructs a Fs from the path, container:path +func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + + client := fshttp.NewClient(ctx) + + f := &Fs{ + name: name, + root: root, + opt: *opt, + cdn: rest.NewClient(client), + rest: rest.NewClient(client).SetRoot(rootURL), + pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), + } + f.features = (&fs.Features{ + DuplicateFiles: true, + CanHaveEmptyDirectories: true, + }).Fill(ctx, f) + f.rest.SetErrorHandler(errorHandler) + + f.rest.SetHeader("X-Auth-Token", f.opt.AppToken) + + auth, err := f.authenticate(ctx) + + if err != nil { + return f, err + } + + var rootSlug string + if opt.RootFolderSlug == "" { + rootSlug = auth.Session.User.RootFolderSlug + } else { + rootSlug = opt.RootFolderSlug + } + + f.dirCache = dircache.New(root, rootSlug, f) + + err = f.dirCache.FindRoot(ctx, false) + + if errors.Is(err, fs.ErrorDirNotFound) { + // All good, we'll create the folder later on. + return f, nil + } + + if errors.Is(err, fs.ErrorIsFile) { + rootFolder, _ := dircache.SplitPath(root) + f.root = rootFolder + f.dirCache = dircache.New(rootFolder, rootSlug, f) + err = f.dirCache.FindRoot(ctx, false) + if err != nil { + return f, err + } + return f, fs.ErrorIsFile + } + + return f, err +} + +// errorHandler parses a non 2xx error response into an error +func errorHandler(resp *http.Response) error { + // Decode error response + errResponse := new(api.Error) + err := rest.DecodeJSON(resp, &errResponse) + if err != nil { + fs.Debugf(nil, "Couldn't decode error response: %v", err) + } + if errResponse.StatusCode == 0 { + errResponse.StatusCode = resp.StatusCode + } + + return errors.WithStack(errResponse) +} + +// retryErrorCodes is a slice of error codes that we will retry +var retryErrorCodes = []int{ + 429, // Too Many Requests. + // TODO: random 500s should be retried but the error code corresponds to a known issue with uploading large files, + // leading to numerous (slow & resource consuming) retries. Don't retry them until the root cause is addressed. + // 500, // Internal Server Error + 502, // Bad Gateway + 503, // Service Unavailable + 504, // Gateway Timeout +} + +// shouldRetry returns a boolean whether this resp and err should be retried. +// It also returns the err for convenience. +func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, reauth bool) (bool, error) { + if err == nil { + return false, nil + } + + if fserrors.ContextError(ctx, &err) { + return false, err + } + + var apiErr *api.Error + if resp != nil && resp.StatusCode == 401 && errors.As(err, &apiErr) && apiErr.ErrorCode == 70001 { + fs.Debugf(nil, "Should retry: %v", err) + + if reauth { + _, err = f.authenticate(ctx) + if err != nil { + return false, err + } + } + + return true, err + } + + return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err +} + +func (f *Fs) authenticate(ctx context.Context) (response *api.AuthenticateResponse, err error) { + // TODO only reauth once if the token expires + + // Remove the old user token + f.rest.RemoveHeader("X-User-Token") + + opts := rest.Opts{ + Method: "PUT", + Path: "/v6/session", + } + + authRequest := api.AuthenticateRequest{ + Login: f.opt.Username, + Password: f.opt.Password, + } + + err = f.pacer.Call(func() (bool, error) { + httpResp, err := f.rest.CallJSON(ctx, &opts, &authRequest, &response) + return f.shouldRetry(ctx, httpResp, err, false) + }) + + if err != nil { + return nil, err + } + + f.rest.SetHeader("X-User-Token", response.TokenID) + + return response, nil +} + +// UploadSession represents a single Uloz.to upload session. +// +// Uloz.to supports uploading multiple files at once and committing them atomically. This functionality isn't being used +// by the backend implementation and for simplicity, each session corresponds to a single file being uploaded. +type UploadSession struct { + Filesystem *Fs + URL string + PrivateSlug string + ValidUntil time.Time +} + +func (f *Fs) createUploadSession(ctx context.Context) (session *UploadSession, err error) { + session = &UploadSession{ + Filesystem: f, + } + + err = session.renewUploadSession(ctx) + if err != nil { + return nil, err + } + + return session, nil +} + +func (session *UploadSession) renewUploadSession(ctx context.Context) error { + opts := rest.Opts{ + Method: "POST", + Path: "/v5/upload/link", + Parameters: url.Values{}, + } + + createUploadURLReq := api.CreateUploadURLRequest{ + UserLogin: session.Filesystem.opt.Username, + Realm: "ulozto", + } + + if session.PrivateSlug != "" { + createUploadURLReq.ExistingSessionSlug = session.PrivateSlug + } + + var err error + var response api.CreateUploadURLResponse + + err = session.Filesystem.pacer.Call(func() (bool, error) { + httpResp, err := session.Filesystem.rest.CallJSON(ctx, &opts, &createUploadURLReq, &response) + return session.Filesystem.shouldRetry(ctx, httpResp, err, true) + }) + + if err != nil { + return err + } + + session.PrivateSlug = response.PrivateSlug + session.URL = response.UploadURL + session.ValidUntil = response.ValidUntil + + return nil +} + +func (f *Fs) uploadUnchecked(ctx context.Context, name, parentSlug string, info fs.ObjectInfo, payload io.Reader) (fs.Object, error) { + session, err := f.createUploadSession(ctx) + + if err != nil { + return nil, err + } + + hashes := hash.NewHashSet(hash.MD5, hash.SHA256) + hasher, err := hash.NewMultiHasherTypes(hashes) + + if err != nil { + return nil, err + } + + payload = io.TeeReader(payload, hasher) + + encodedName := f.opt.Enc.FromStandardName(name) + + opts := rest.Opts{ + Method: "POST", + Body: payload, + // Not using Parameters as the session URL has parameters itself + RootURL: session.URL + "&batch_file_id=1&is_porn=false", + MultipartContentName: "file", + MultipartFileName: encodedName, + Parameters: url.Values{}, + } + var uploadResponse api.SendFilePayloadResponse + + err = f.pacer.CallNoRetry(func() (bool, error) { + httpResp, err := f.cdn.CallJSON(ctx, &opts, nil, &uploadResponse) + return f.shouldRetry(ctx, httpResp, err, true) + }) + + if err != nil { + return nil, err + } + + sha256digest, err := hasher.Sum(hash.SHA256) + if err != nil { + return nil, err + } + + md5digest, err := hasher.Sum(hash.MD5) + if err != nil { + return nil, err + } + + if hex.EncodeToString(md5digest) != uploadResponse.Md5 { + return nil, errors.New("MD5 digest mismatch") + } + + metadata := DescriptionEncodedMetadata{ + Md5Hash: md5digest, + Sha256Hash: sha256digest, + ModTimeEpochMicros: info.ModTime(ctx).UnixMicro(), + } + + encodedMetadata, err := metadata.encode() + + if err != nil { + return nil, err + } + + // Successfully uploaded, now move the file where it belongs and commit it + updateReq := api.BatchUpdateFilePropertiesRequest{ + Name: encodedName, + FolderSlug: parentSlug, + Description: encodedMetadata, + Slugs: []string{uploadResponse.Slug}, + UploadTokens: map[string]string{uploadResponse.Slug: session.PrivateSlug + ":1"}, + } + + var updateResponse []api.File + + opts = rest.Opts{ + Method: "PATCH", + Path: "/v8/file-list/private", + Parameters: url.Values{}, + } + + err = f.pacer.Call(func() (bool, error) { + httpResp, err := session.Filesystem.rest.CallJSON(ctx, &opts, &updateReq, &updateResponse) + return f.shouldRetry(ctx, httpResp, err, true) + }) + + if err != nil { + return nil, err + } + + if len(updateResponse) != 1 { + return nil, errors.New("unexpected number of files in the response") + } + + opts = rest.Opts{ + Method: "PATCH", + Path: "/v8/upload-batch/private/" + session.PrivateSlug, + Parameters: url.Values{}, + } + + commitRequest := api.CommitUploadBatchRequest{ + Status: "confirmed", + OwnerLogin: f.opt.Username, + } + + var commitResponse api.CommitUploadBatchResponse + + err = f.pacer.Call(func() (bool, error) { + httpResp, err := session.Filesystem.rest.CallJSON(ctx, &opts, &commitRequest, &commitResponse) + return f.shouldRetry(ctx, httpResp, err, true) + }) + + if err != nil { + return nil, err + } + + file, err := f.newObjectWithInfo(ctx, info.Remote(), &updateResponse[0]) + + return file, err +} + +// Put implements the mandatory method fs.Fs.Put. +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + // TODO: workaround for uloz.to's bug. Remove when chunked upload support is implemented. + if src.Size() > maxFileSizeBytes { + return nil, errors.New("file size over the supported max threshold") + } + existingObj, err := f.NewObject(ctx, src.Remote()) + + switch { + case err == nil: + return existingObj, existingObj.Update(ctx, in, src, options...) + case errors.Is(err, fs.ErrorObjectNotFound): + // Not found so create it + return f.PutUnchecked(ctx, in, src, options...) + default: + return nil, err + } +} + +// PutUnchecked implements the optional interface fs.PutUncheckeder. +// +// Uloz.to allows to have multiple files of the same name in the same folder. +func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + filename, folderSlug, err := f.dirCache.FindPath(ctx, src.Remote(), true) + + if err != nil { + return nil, err + } + + return f.uploadUnchecked(ctx, filename, folderSlug, src, in) +} + +// Mkdir implements the mandatory method fs.Fs.Mkdir. +func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { + _, err = f.dirCache.FindDir(ctx, dir, true) + return err +} + +func (f *Fs) isDirEmpty(ctx context.Context, slug string) (empty bool, err error) { + folders, err := f.fetchListFolderPage(ctx, slug, "", 1, 0) + + if err != nil { + return false, err + } + + if len(folders) > 0 { + return false, nil + } + + files, err := f.fetchListFilePage(ctx, slug, "", 1, 0) + + if err != nil { + return false, err + } + + if len(files) > 0 { + return false, nil + } + + return true, nil +} + +// Rmdir implements the mandatory method fs.Fs.Rmdir. +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + slug, err := f.dirCache.FindDir(ctx, dir, false) + + if err != nil { + return err + } + + empty, err := f.isDirEmpty(ctx, slug) + + if err != nil { + return err + } + + if !empty { + return fs.ErrorDirectoryNotEmpty + } + + opts := rest.Opts{ + Method: "DELETE", + Path: "/v5/user/" + f.opt.Username + "/folder-list", + } + + req := api.DeleteFoldersRequest{Slugs: []string{slug}} + err = f.pacer.Call(func() (bool, error) { + httpResp, err := f.rest.CallJSON(ctx, &opts, req, nil) + return f.shouldRetry(ctx, httpResp, err, true) + }) + + if err != nil { + return err + } + + f.dirCache.FlushDir(dir) + + return nil +} + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String converts this Fs to a string +func (f *Fs) String() string { + return fmt.Sprintf("uloz.to root '%s'", f.root) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// Precision return the precision of this Fs +func (f *Fs) Precision() time.Duration { + return time.Microsecond +} + +// Hashes implements fs.Fs.Hashes by returning the supported hash types of the filesystem. +func (f *Fs) Hashes() hash.Set { + return hash.NewHashSet(hash.SHA256, hash.MD5) +} + +// DescriptionEncodedMetadata represents a set of metadata encoded as Uloz.to description. +// +// Uloz.to doesn't support setting metadata such as mtime but allows the user to set an arbitrary description field. +// The content of this structure will be serialized and stored in the backend. +// +// The files themselves are immutable so there's no danger that the file changes, and we'll forget to update the hashes. +// It is theoretically possible to rewrite the description to provide incorrect information for a file. However, in case +// it's a real attack vector, a nefarious person already has write access to the repo, and the situation is above +// rclone's pay grade already. +type DescriptionEncodedMetadata struct { + Md5Hash []byte // The MD5 hash of the file + Sha256Hash []byte // The SHA256 hash of the file + ModTimeEpochMicros int64 // The mtime of the file, as set by rclone +} + +func (md *DescriptionEncodedMetadata) encode() (string, error) { + b := bytes.Buffer{} + e := gob.NewEncoder(&b) + err := e.Encode(md) + if err != nil { + return "", err + } + // Version the encoded string from the beginning even though we don't need it yet. + return "1;" + base64.StdEncoding.EncodeToString(b.Bytes()), nil +} + +func decodeDescriptionMetadata(str string) (*DescriptionEncodedMetadata, error) { + // The encoded data starts with a version number which is not a part iof the serialized object + spl := strings.SplitN(str, ";", 2) + + if len(spl) < 2 || spl[0] != "1" { + return nil, errors.New("can't decode, unknown encoded metadata version") + } + + m := DescriptionEncodedMetadata{} + by, err := base64.StdEncoding.DecodeString(spl[1]) + if err != nil { + return nil, err + } + b := bytes.Buffer{} + b.Write(by) + d := gob.NewDecoder(&b) + err = d.Decode(&m) + if err != nil { + return nil, err + } + return &m, nil +} + +// Object describes an uloz.to object. +// +// Valid objects will always have all fields but encodedMetadata set. +type Object struct { + fs *Fs // what this object is part of + remote string // The remote path + name string // The file name + size int64 // size of the object + slug string // ID of the object + remoteFsMtime time.Time // The time the object was last modified in the remote fs. + // Metadata not available natively and encoded in the description field. May not be present if the encoded metadata + // is not present (e.g. if file wasn't uploaded by rclone) or invalid. + encodedMetadata *DescriptionEncodedMetadata +} + +// Storable implements the mandatory method fs.ObjectInfo.Storable +func (o *Object) Storable() bool { + return true +} + +func (o *Object) updateFileProperties(ctx context.Context, req interface{}) (err error) { + var resp *api.File + + opts := rest.Opts{ + Method: "PATCH", + Path: "/v8/file/" + o.slug + "/private", + } + + err = o.fs.pacer.Call(func() (bool, error) { + httpResp, err := o.fs.rest.CallJSON(ctx, &opts, &req, &resp) + return o.fs.shouldRetry(ctx, httpResp, err, true) + }) + + if err != nil { + return err + } + + return o.setMetaData(resp) +} + +// SetModTime implements the mandatory method fs.Object.SetModTime +func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) { + var newMetadata DescriptionEncodedMetadata + if o.encodedMetadata == nil { + newMetadata = DescriptionEncodedMetadata{} + } else { + newMetadata = *o.encodedMetadata + } + + newMetadata.ModTimeEpochMicros = t.UnixMicro() + encoded, err := newMetadata.encode() + if err != nil { + return err + } + return o.updateFileProperties(ctx, api.UpdateDescriptionRequest{ + Description: encoded, + }) +} + +// Open implements the mandatory method fs.Object.Open +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { + opts := rest.Opts{ + Method: "POST", + Path: "/v5/file/download-link/vipdata", + } + + req := &api.GetDownloadLinkRequest{ + Slug: o.slug, + UserLogin: o.fs.opt.Username, + // Has to be set but doesn't seem to be used server side. + DeviceID: "foobar", + } + + var resp *api.GetDownloadLinkResponse + + err = o.fs.pacer.Call(func() (bool, error) { + httpResp, err := o.fs.rest.CallJSON(ctx, &opts, &req, &resp) + return o.fs.shouldRetry(ctx, httpResp, err, true) + }) + if err != nil { + return nil, err + } + + opts = rest.Opts{ + Method: "GET", + RootURL: resp.Link, + Options: options, + } + + var httpResp *http.Response + + err = o.fs.pacer.Call(func() (bool, error) { + httpResp, err = o.fs.cdn.Call(ctx, &opts) + return o.fs.shouldRetry(ctx, httpResp, err, true) + }) + if err != nil { + return nil, err + } + return httpResp.Body, err +} + +func (o *Object) copyFrom(other *Object) { + o.fs = other.fs + o.remote = other.remote + o.size = other.size + o.slug = other.slug + o.remoteFsMtime = other.remoteFsMtime + o.encodedMetadata = other.encodedMetadata +} + +// RenamingObjectInfoProxy is a delegating proxy for fs.ObjectInfo +// with the option of specifying a different remote path. +type RenamingObjectInfoProxy struct { + delegate fs.ObjectInfo + remote string +} + +// Remote implements fs.ObjectInfo.Remote by delegating to the wrapped instance. +func (s *RenamingObjectInfoProxy) String() string { + return s.delegate.String() +} + +// Remote implements fs.ObjectInfo.Remote by returning the specified remote path. +func (s *RenamingObjectInfoProxy) Remote() string { + return s.remote +} + +// ModTime implements fs.ObjectInfo.ModTime by delegating to the wrapped instance. +func (s *RenamingObjectInfoProxy) ModTime(ctx context.Context) time.Time { + return s.delegate.ModTime(ctx) +} + +// Size implements fs.ObjectInfo.Size by delegating to the wrapped instance. +func (s *RenamingObjectInfoProxy) Size() int64 { + return s.delegate.Size() +} + +// Fs implements fs.ObjectInfo.Fs by delegating to the wrapped instance. +func (s *RenamingObjectInfoProxy) Fs() fs.Info { + return s.delegate.Fs() +} + +// Hash implements fs.ObjectInfo.Hash by delegating to the wrapped instance. +func (s *RenamingObjectInfoProxy) Hash(ctx context.Context, ty hash.Type) (string, error) { + return s.delegate.Hash(ctx, ty) +} + +// Storable implements fs.ObjectInfo.Storable by delegating to the wrapped instance. +func (s *RenamingObjectInfoProxy) Storable() bool { + return s.delegate.Storable() +} + +// Update implements the mandatory method fs.Object.Update +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + // The backend allows to store multiple files with the same name, so simply upload the new file and remove the old + // one afterwards. + info := &RenamingObjectInfoProxy{ + delegate: src, + remote: o.Remote(), + } + newo, err := o.fs.PutUnchecked(ctx, in, info, options...) + + if err != nil { + return err + } + + err = o.Remove(ctx) + if err != nil { + return err + } + + o.copyFrom(newo.(*Object)) + + return nil +} + +// Remove implements the mandatory method fs.Object.Remove +func (o *Object) Remove(ctx context.Context) error { + for i := 0; i < 2; i++ { + // First call moves the item to recycle bin, second deletes it for good + var err error + opts := rest.Opts{ + Method: "DELETE", + Path: "/v6/file/" + o.slug + "/private", + } + err = o.fs.pacer.Call(func() (bool, error) { + httpResp, err := o.fs.rest.CallJSON(ctx, &opts, nil, nil) + return o.fs.shouldRetry(ctx, httpResp, err, true) + }) + if err != nil { + return err + } + } + + return nil +} + +// ModTime implements the mandatory method fs.Object.ModTime +func (o *Object) ModTime(ctx context.Context) time.Time { + if o.encodedMetadata != nil { + return time.UnixMicro(o.encodedMetadata.ModTimeEpochMicros) + } + + // The time the object was last modified on the server - a handwavy guess, but we don't have any better + return o.remoteFsMtime + +} + +// Fs implements the mandatory method fs.Object.Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// String returns the string representation of the remote object reference. +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + return o.size +} + +// Hash implements the mandatory method fs.Object.Hash. +// +// Supports SHA256 and MD5 hashes. +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + if t != hash.MD5 && t != hash.SHA256 { + return "", hash.ErrUnsupported + } + + if o.encodedMetadata == nil { + return "", nil + } + + switch t { + case hash.MD5: + return hex.EncodeToString(o.encodedMetadata.Md5Hash), nil + case hash.SHA256: + return hex.EncodeToString(o.encodedMetadata.Sha256Hash), nil + } + + panic("Should never get here") +} + +// FindLeaf implements dircache.DirCacher.FindLeaf by successively walking through the folder hierarchy until +// the desired folder is found, or there's nowhere to continue. +func (f *Fs) FindLeaf(ctx context.Context, folderSlug, leaf string) (leafSlug string, found bool, err error) { + folders, err := f.listFolders(ctx, folderSlug, leaf) + if err != nil { + if errors.Is(err, fs.ErrorDirNotFound) { + return "", false, nil + } + return "", false, err + } + + for _, folder := range folders { + if folder.Name == leaf { + return folder.Slug, true, nil + } + } + + // Uloz.to allows creation of multiple files / folders with the same name in the same parent folder. rclone always + // expects folder paths to be unique (no other file or folder with the same name should exist). As a result we also + // need to look at the files to return the correct error if necessary. + files, err := f.listFiles(ctx, folderSlug, leaf) + if err != nil { + return "", false, err + } + + for _, file := range files { + if file.Name == leaf { + return "", false, fs.ErrorIsFile + } + } + + // The parent folder exists but no file or folder with the given name was found in it. + return "", false, nil +} + +// CreateDir implements dircache.DirCacher.CreateDir by creating a folder with the given name under a folder identified +// by parentSlug. +func (f *Fs) CreateDir(ctx context.Context, parentSlug, leaf string) (newID string, err error) { + var folder *api.Folder + opts := rest.Opts{ + Method: "POST", + Path: "/v6/user/" + f.opt.Username + "/folder", + Parameters: url.Values{}, + } + mkdir := api.CreateFolderRequest{ + Name: f.opt.Enc.FromStandardName(leaf), + ParentFolderSlug: parentSlug, + } + err = f.pacer.Call(func() (bool, error) { + httpResp, err := f.rest.CallJSON(ctx, &opts, &mkdir, &folder) + return f.shouldRetry(ctx, httpResp, err, true) + }) + if err != nil { + return "", err + } + return folder.Slug, nil +} + +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (*Object, error) { + o := &Object{ + fs: f, + remote: remote, + } + var err error + + if info == nil { + info, err = f.readMetaDataForPath(ctx, remote) + } + + if err != nil { + return nil, err + } + + err = o.setMetaData(info) + if err != nil { + return nil, err + } + return o, nil +} + +// readMetaDataForPath reads the metadata from the path +func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.File, err error) { + filename, folderSlug, err := f.dirCache.FindPath(ctx, path, false) + if err != nil { + if errors.Is(err, fs.ErrorDirNotFound) { + return nil, fs.ErrorObjectNotFound + } + return nil, err + } + + files, err := f.listFiles(ctx, folderSlug, filename) + + if err != nil { + return nil, err + } + + for _, file := range files { + if file.Name == filename { + return &file, nil + } + } + + folders, err := f.listFolders(ctx, folderSlug, filename) + + if err != nil { + return nil, err + } + + for _, file := range folders { + if file.Name == filename { + return nil, fs.ErrorIsDir + } + } + + return nil, fs.ErrorObjectNotFound +} + +func (o *Object) setMetaData(info *api.File) (err error) { + o.name = info.Name + o.size = info.Filesize + o.remoteFsMtime = info.LastUserModified + o.encodedMetadata, err = decodeDescriptionMetadata(info.Description) + if err != nil { + fs.Debugf(o, "Couldn't decode metadata: %v", err) + } + o.slug = info.Slug + return nil +} + +// NewObject implements fs.Fs.NewObject. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.newObjectWithInfo(ctx, remote, nil) +} + +// List implements fs.Fs.List by listing all files and folders in the given folder. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + folderSlug, err := f.dirCache.FindDir(ctx, dir, false) + if err != nil { + return nil, err + } + + folders, err := f.listFolders(ctx, folderSlug, "") + if err != nil { + return nil, err + } + + for _, folder := range folders { + remote := path.Join(dir, folder.Name) + f.dirCache.Put(remote, folder.Slug) + entries = append(entries, fs.NewDir(remote, folder.LastUserModified)) + } + + files, err := f.listFiles(ctx, folderSlug, "") + if err != nil { + return nil, err + } + + for _, file := range files { + remote := path.Join(dir, file.Name) + remoteFile, err := f.newObjectWithInfo(ctx, remote, &file) + if err != nil { + return nil, err + } + entries = append(entries, remoteFile) + } + + return entries, nil +} + +func (f *Fs) fetchListFolderPage( + ctx context.Context, + folderSlug string, + searchQuery string, + limit int, + offset int) (folders []api.Folder, err error) { + + opts := rest.Opts{ + Method: "GET", + Path: "/v9/user/" + f.opt.Username + "/folder/" + folderSlug + "/folder-list", + Parameters: url.Values{}, + } + + opts.Parameters.Set("status", "ok") + opts.Parameters.Set("limit", strconv.Itoa(limit)) + if offset > 0 { + opts.Parameters.Set("offset", strconv.Itoa(offset)) + } + + if searchQuery != "" { + opts.Parameters.Set("search_query", f.opt.Enc.FromStandardName(searchQuery)) + } + + var respBody *api.ListFoldersResponse + + err = f.pacer.Call(func() (bool, error) { + httpResp, err := f.rest.CallJSON(ctx, &opts, nil, &respBody) + return f.shouldRetry(ctx, httpResp, err, true) + }) + + if err != nil { + return nil, err + } + + for i := range respBody.Subfolders { + respBody.Subfolders[i].Name = f.opt.Enc.ToStandardName(respBody.Subfolders[i].Name) + } + + return respBody.Subfolders, nil +} + +func (f *Fs) listFolders( + ctx context.Context, + folderSlug string, + searchQuery string) (folders []api.Folder, err error) { + + targetPageSize := f.opt.ListPageSize + lastPageSize := targetPageSize + offset := 0 + + for targetPageSize == lastPageSize { + page, err := f.fetchListFolderPage(ctx, folderSlug, searchQuery, targetPageSize, offset) + if err != nil { + var apiErr *api.Error + casted := errors.As(err, &apiErr) + if casted && apiErr.ErrorCode == 30001 { + return nil, fs.ErrorDirNotFound + } + return nil, err + } + lastPageSize = len(page) + offset += lastPageSize + folders = append(folders, page...) + } + + return folders, nil +} + +func (f *Fs) fetchListFilePage( + ctx context.Context, + folderSlug string, + searchQuery string, + limit int, + offset int) (folders []api.File, err error) { + + opts := rest.Opts{ + Method: "GET", + Path: "/v8/user/" + f.opt.Username + "/folder/" + folderSlug + "/file-list", + Parameters: url.Values{}, + } + opts.Parameters.Set("status", "ok") + opts.Parameters.Set("limit", strconv.Itoa(limit)) + if offset > 0 { + opts.Parameters.Set("offset", strconv.Itoa(offset)) + } + + if searchQuery != "" { + opts.Parameters.Set("search_query", f.opt.Enc.FromStandardName(searchQuery)) + } + + var respBody *api.ListFilesResponse + + err = f.pacer.Call(func() (bool, error) { + httpResp, err := f.rest.CallJSON(ctx, &opts, nil, &respBody) + return f.shouldRetry(ctx, httpResp, err, true) + }) + + if err != nil { + return nil, fmt.Errorf("couldn't list files: %w", err) + } + + for i := range respBody.Items { + respBody.Items[i].Name = f.opt.Enc.ToStandardName(respBody.Items[i].Name) + } + + return respBody.Items, nil +} + +func (f *Fs) listFiles( + ctx context.Context, + folderSlug string, + searchQuery string) (folders []api.File, err error) { + + targetPageSize := f.opt.ListPageSize + lastPageSize := targetPageSize + offset := 0 + + for targetPageSize == lastPageSize { + page, err := f.fetchListFilePage(ctx, folderSlug, searchQuery, targetPageSize, offset) + if err != nil { + return nil, err + } + lastPageSize = len(page) + offset += lastPageSize + folders = append(folders, page...) + } + + return folders, nil +} + +// DirCacheFlush implements the optional fs.DirCacheFlusher interface. +func (f *Fs) DirCacheFlush() { + f.dirCache.ResetRoot() +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = (*Fs)(nil) + _ dircache.DirCacher = (*Fs)(nil) + _ fs.DirCacheFlusher = (*Fs)(nil) + _ fs.PutUncheckeder = (*Fs)(nil) + _ fs.Object = (*Object)(nil) + _ fs.ObjectInfo = (*RenamingObjectInfoProxy)(nil) +) diff --git a/backend/ulozto/ulozto_test.go b/backend/ulozto/ulozto_test.go new file mode 100644 index 000000000..99507fc24 --- /dev/null +++ b/backend/ulozto/ulozto_test.go @@ -0,0 +1,117 @@ +package ulozto + +import ( + "bytes" + "context" + "errors" + "testing" + "time" + + "github.com/rclone/rclone/backend/ulozto/api" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/fs/object" + "github.com/rclone/rclone/fs/operations" + "github.com/rclone/rclone/fstest" + "github.com/stretchr/testify/require" + + "github.com/rclone/rclone/fstest/fstests" +) + +// TestIntegration runs integration tests against the remote +func TestIntegration(t *testing.T) { + fstests.Run(t, &fstests.Opt{ + RemoteName: "TestUlozto:", + NilObject: (*Object)(nil), + }) +} + +// TestListWithoutMetadata verifies that basic operations can be performed even if the remote file wasn't written by +// rclone, or the serialized metadata can't be read. +func TestListWithoutMetadata(t *testing.T) { + const ( + remoteName = "TestUlozto:" + payload = "42foobar42" + sha256 = "d41f400003e93eb0891977f525e73ecedfa04272d2036f6137106168ecb196ab" + md5 = "8ad32cfeb3dc0f5092261268f335e0a5" + filesize = len(payload) + ) + ctx := context.Background() + fstest.Initialise() + subRemoteName, subRemoteLeaf, err := fstest.RandomRemoteName(remoteName) + require.NoError(t, err) + f, err := fs.NewFs(ctx, subRemoteName) + if errors.Is(err, fs.ErrorNotFoundInConfigFile) { + t.Logf("Didn't find %q in config file - skipping tests", remoteName) + return + } + require.NoError(t, err) + + file := fstest.Item{ModTime: time.UnixMilli(123456789), Path: subRemoteLeaf, Size: int64(filesize), Hashes: map[hash.Type]string{ + hash.SHA256: sha256, + hash.MD5: md5, + }} + + // Create a file with the given content and metadata + obj := fstests.PutTestContents(ctx, t, f, &file, payload, false) + + // Verify the file has been uploaded + fstest.CheckListing(t, f, []fstest.Item{file}) + + // Now delete the description metadata + uloztoObj := obj.(*Object) + err = uloztoObj.updateFileProperties(ctx, api.UpdateDescriptionRequest{ + Description: "", + }) + + require.NoError(t, err) + + // Listing the file should still succeed, although with estimated mtime and no hashes + fileWithoutDetails := fstest.Item{Path: subRemoteLeaf, Size: int64(filesize), ModTime: uloztoObj.remoteFsMtime, Hashes: map[hash.Type]string{ + hash.SHA256: "", + hash.MD5: "", + }} + fstest.CheckListing(t, f, []fstest.Item{fileWithoutDetails}) + + mtime := time.UnixMilli(987654321) + + // When we update the mtime it should be reflected but hashes should stay intact + require.NoError(t, obj.SetModTime(ctx, mtime)) + updatedMtimeFile := fstest.Item{Path: subRemoteLeaf, Size: int64(filesize), ModTime: mtime, Hashes: map[hash.Type]string{ + hash.SHA256: "", + hash.MD5: "", + }} + fstest.CheckListing(t, f, []fstest.Item{updatedMtimeFile}) + + // Tear down + require.NoError(t, operations.Purge(ctx, f, "")) +} + +// TestUploadLargeFile verifies that files over the supported threshold are not uploaded. +func TestUploadLargeFile(t *testing.T) { + const ( + remoteName = "TestUlozto:" + payload = "foobar" + filesize = maxFileSizeBytes + 1 + ) + ctx := context.Background() + fstest.Initialise() + subRemoteName, subRemoteLeaf, err := fstest.RandomRemoteName(remoteName) + require.NoError(t, err) + f, err := fs.NewFs(ctx, subRemoteName) + if errors.Is(err, fs.ErrorNotFoundInConfigFile) { + t.Logf("Didn't find %q in config file - skipping tests", remoteName) + return + } + require.NoError(t, err) + + file := fstest.Item{ModTime: time.UnixMilli(123456789), Path: subRemoteLeaf, Size: int64(filesize)} + obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil) + // The payload buffer is just a placeholder which shouldn't be used + _, err = f.Put(ctx, bytes.NewBufferString(payload), obji) + + require.Error(t, err, "File size over the supported max threshold.") + + // Verify the remote stayed intact + fstest.CheckListing(t, f, []fstest.Item{}) +} diff --git a/bin/make_manual.py b/bin/make_manual.py index 16fc12288..2ebeb207f 100755 --- a/bin/make_manual.py +++ b/bin/make_manual.py @@ -79,6 +79,7 @@ docs = [ "storj.md", "sugarsync.md", "tardigrade.md", # stub only to redirect to storj.md + "ulozto.md", "uptobox.md", "union.md", "webdav.md", diff --git a/docs/content/_index.md b/docs/content/_index.md index 5068425b3..0d2b1afd2 100644 --- a/docs/content/_index.md +++ b/docs/content/_index.md @@ -180,6 +180,7 @@ WebDAV or S3, that work out of the box.) {{< provider name="Synology" home="https://c2.synology.com/en-global/object-storage/overview" config="/s3/#synology-c2" >}} {{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}} {{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}} +{{< provider name="Uloz.to" home="https://uloz.to" config="/ulozto/" >}} {{< provider name="Uptobox" home="https://uptobox.com" config="/uptobox/" >}} {{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}} {{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}} diff --git a/docs/content/docs.md b/docs/content/docs.md index 02735e21f..672de81f8 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -77,6 +77,7 @@ See the following for detailed instructions for * [Storj](/storj/) * [SugarSync](/sugarsync/) * [Union](/union/) + * [Uloz.to](/ulozto/) * [Uptobox](/uptobox/) * [WebDAV](/webdav/) * [Yandex Disk](/yandex/) diff --git a/docs/content/overview.md b/docs/content/overview.md index 7624977e2..1a4a5fe4c 100644 --- a/docs/content/overview.md +++ b/docs/content/overview.md @@ -57,6 +57,7 @@ Here is an overview of the major features of each cloud storage system. | SMB | - | R/W | Yes | No | - | - | | SugarSync | - | - | No | No | - | - | | Storj | - | R | No | No | - | - | +| Uloz.to | MD5, SHA256 ¹³ | - | No | Yes | - | - | | Uptobox | - | - | No | Yes | - | - | | WebDAV | MD5, SHA1 ³ | R ⁴ | Depends | No | - | - | | Yandex Disk | MD5 | R/W | No | No | R | - | @@ -100,6 +101,9 @@ hash](https://static.hidrive.com/dev/0001). It combines SHA1 sums for each 4 KiB block hierarchically to a single top-level sum. +¹³ Uloz.to provides server-calculated MD5 hash upon file upload. MD5 and SHA256 +hashes are client-calculated and stored as metadata fields. + ### Hash ### The cloud storage system supports various hash types of the objects. @@ -523,6 +527,7 @@ upon backend-specific capabilities. | SMB | No | No | Yes | Yes | No | No | Yes | Yes | No | No | Yes | | SugarSync | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | No | Yes | | Storj | Yes ² | Yes | Yes | No | No | Yes | Yes | No | Yes | No | No | +| Uloz.to | No | No | No | No | No | No | No | No | No | No | Yes | | Uptobox | No | Yes | Yes | Yes | No | No | No | No | No | No | No | | WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ³ | No | No | Yes | Yes | | Yandex Disk | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes | diff --git a/docs/content/ulozto.md b/docs/content/ulozto.md new file mode 100644 index 000000000..1ad683698 --- /dev/null +++ b/docs/content/ulozto.md @@ -0,0 +1,247 @@ +--- +title: "Uloz.to" +description: "Rclone docs for Uloz.to" +versionIntroduced: "v1.66" +--- + +# {{< icon "fa fa-box-archive" >}} Uloz.to + +Paths are specified as `remote:path` + +Paths may be as deep as required, e.g. `remote:directory/subdirectory`. + +The initial setup for Box involves filling in the user credentials. +`rclone config` walks you through it. + +## Configuration + +Here is an example of how to make a remote called `remote`. First run: + + rclone config + +This will guide you through an interactive setup process: + +``` +No remotes found, make a new one? +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n +name> remote +Type of storage to configure. +Choose a number from below, or type in your own value +[snip] +XX / Uloz.to + \ "ulozto" +[snip] +Storage> ulozto + +Option app_token. +The application token identifying the app. An app API key can be either found in the API doc +https://uloz.to/upload-resumable-api-beta or obtained from customer service. +Enter a value. Press Enter to leave empty. +app_token> token_value + +Option username. +The username of the principal to operate as. +Enter a value. Press Enter to leave empty. +username> user + +Option password. +The password for the user. +Enter a value. Press Enter to leave empty. +password> secretPassword + +Edit advanced config? +y) Yes +n) No (default) +y/n> n + +Keep this "remote" remote? +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> y +``` + +Once configured you can then use `rclone` like this, + +List folders in root level folder: + + rclone lsd remote: + +List all the files in your root folder: + + rclone ls remote: + +To copy a local folder to a Uloz.to folder called backup: + + rclone copy /home/source remote:backup + +### User credentials +The only reliable method is to authenticate the user using +username and password. Uloz.to offers anb API key as well, but +it's reserved for the use of Uloz.to's in-house application +and using it in different circumstances is unreliable. + +### Modification times and hashes + +Uloz.to doesn't allow the user to set a custom modification time, +or retrieve the hashes after upload. As a result, the integration +uses a free form field the API provides to encode client-provided +timestamps and hashes. Timestamps are stored with microsecond +precision. + +A server calculated MD5 hash of the file is verified upon upload. +Afterwards, the backend only serves the client-side calculated +hashes. + +### Restricted filename characters + +In addition to the [default restricted characters set](/overview/#restricted-characters) +the following characters are also replaced: + +| Character | Value | Replacement | +| --------- |:-----:|:-----------:| +| \ | 0x5C | \ | + +Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8), +as they can't be used in JSON strings. + +### Transfers + +All files are currently uploaded using a single HTTP request, so +for uploading large files a stable connection is necesary. Rclone will +upload up to `--transfers` chunks at the same time (shared among all +uploads). + +### Deleting files + +By default, files are moved to the recycle bin whereas folders +are deleted immediately. Trashed files are permanently deleted after +30 days in the recycle bin. + +Emptying the trash is currently not implemented in rclone. + +### Root folder ID + +You can set the `root_folder_slug` for rclone. This is the folder +(identified by its `Folder slug`) that rclone considers to be the root +of your Uloz.to drive. + +Normally you will leave this blank and rclone will determine the +correct root to use itself. However you can set this to restrict rclone +to a specific folder hierarchy. + +In order to do this you will have to find the `Folder slug` of the +folder you wish to use as root. This will be the last segment +of the URL when you open the relevant folder in the Box web +interface. + +For example, for exploring a folder with URL +`https://uloz.to/fm/my-files/foobar`, `foobar` should be used as the +root slug. + +`root_folder_slug` can be used alongside a specific path in the remote +path. For example, if your remote's `root_folder_slug` corresponds to `/foo/bar`, +`remote:baz/qux` will refer to `ABSOLUTE_ULOZTO_ROOT/foo/bar/baz/qux`. + +{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/ulozto/ulozto.go then run make backenddocs" >}} +### Standard options + +Here are the Standard options specific to ulozto (Uloz.to). + +#### --ulozto-app-token + +The application token identifying the app. An app API key can be either found in the API doc https://uloz.to/upload-resumable-api-beta or obtained from customer service. + +Properties: + +- Config: app_token +- Env Var: RCLONE_ULOZTO_APP_TOKEN +- Type: string +- Required: false + +#### --ulozto-username + +The username of the principal to operate as. + +Properties: + +- Config: username +- Env Var: RCLONE_ULOZTO_USERNAME +- Type: string +- Required: false + +#### --ulozto-password + +The password for the user. + +Properties: + +- Config: password +- Env Var: RCLONE_ULOZTO_PASSWORD +- Type: string +- Required: false + +### Advanced options + +Here are the Advanced options specific to ulozto (Uloz.to). + +#### --ulozto-root-folder-slug + +If set, rclone will use this folder as the root folder for all operations. For example, if the slug identifies 'foo/bar/', 'ulozto:baz' is equivalent to 'ulozto:foo/bar/baz' without any root slug set. + +Properties: + +- Config: root_folder_slug +- Env Var: RCLONE_ULOZTO_ROOT_FOLDER_SLUG +- Type: string +- Required: false + +#### --ulozto-list-page-size + +The size of a single page for list commands. 1-500 + +Properties: + +- Config: list_page_size +- Env Var: RCLONE_ULOZTO_LIST_PAGE_SIZE +- Type: int +- Default: 500 + +#### --ulozto-encoding + +The encoding for the backend. + +See the [encoding section in the overview](/overview/#encoding) for more info. + +Properties: + +- Config: encoding +- Env Var: RCLONE_ULOZTO_ENCODING +- Type: Encoding +- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot + +{{< rem autogenerated options stop >}} + +## Limitations + +Uloz.to file names can't have the `\` character in. rclone maps this to +and from an identical looking unicode equivalent `\` (U+FF3C Fullwidth +Reverse Solidus). + +Uloz.to only supports filenames up to 255 characters in length. + +Uloz.to rate limits access to the API, but exact details are undisclosed. +Practical testing reveals that hitting the rate limit during normal use +is very rare, although not impossible with higher number of concurrently +uploaded files. + +`rclone about` is not supported by the Uloz.to backend. Although +there's an endpoint to retrieve the information for the UI, it's not +exposed in the API. Backends without this capability cannot determine +free space for an rclone mount or use policy `mfs` (most free space) +as a member of an rclone union remote. + +See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/) \ No newline at end of file diff --git a/docs/layouts/chrome/navbar.html b/docs/layouts/chrome/navbar.html index 9094f94ee..629bf0e57 100644 --- a/docs/layouts/chrome/navbar.html +++ b/docs/layouts/chrome/navbar.html @@ -100,6 +100,7 @@ SMB / CIFS Storj SugarSync + Uloz.to Uptobox Union (merge backends) WebDAV diff --git a/fstest/test_all/config.yaml b/fstest/test_all/config.yaml index 0ddf6ac90..35640d2e6 100644 --- a/fstest/test_all/config.yaml +++ b/fstest/test_all/config.yaml @@ -458,3 +458,6 @@ backends: - backend: "quatrix" remote: "TestQuatrix:" fastlist: false + - backend: "ulozto" + remote: "TestUlozto:" + fastlist: false