mirror of
https://github.com/rclone/rclone.git
synced 2024-11-22 05:28:09 +08:00
hubic: remove backend as service has now shutdown - fixes #6481
This commit is contained in:
parent
b437d9461a
commit
4e79de106a
|
@ -45,7 +45,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/hidrive"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
|
|
|
@ -1,62 +0,0 @@
|
|||
package hubic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// auth is an authenticator for swift
|
||||
type auth struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// newAuth creates a swift authenticator
|
||||
func newAuth(f *Fs) *auth {
|
||||
return &auth{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Request constructs an http.Request for authentication
|
||||
//
|
||||
// returns nil for not needed
|
||||
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
|
||||
const retries = 10
|
||||
for try := 1; try <= retries; try++ {
|
||||
err = a.f.getCredentials(context.TODO())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Response parses the result of an http request
|
||||
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The public storage URL - set Internal to true to read
|
||||
// internal/service net URL
|
||||
func (a *auth) StorageUrl(Internal bool) string { // nolint
|
||||
return a.f.credentials.Endpoint
|
||||
}
|
||||
|
||||
// The access token
|
||||
func (a *auth) Token() string {
|
||||
return a.f.credentials.Token
|
||||
}
|
||||
|
||||
// The CDN url if available
|
||||
func (a *auth) CdnUrl() string { // nolint
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ swift.Authenticator = (*auth)(nil)
|
|
@ -1,200 +0,0 @@
|
|||
// Package hubic provides an interface to the Hubic object storage
|
||||
// system.
|
||||
package hubic
|
||||
|
||||
// This uses the normal swift mechanism to update the credentials and
|
||||
// ignores the expires field returned by the Hubic API. This may need
|
||||
// to be revisited after some actual experience.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
swiftLib "github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
|
||||
rcloneEncryptedClientSecret = "leZKCcqy9movLhDWLVXX8cSLp_FzoiAPeEJOIOMRw1A5RuC4iLEPDYPWVF46adC_MVonnLdVEOTHVstfBOZ_lY4WNp8CK_YWlpRZ9diT5YI"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: []string{
|
||||
"credentials.r", // Read OpenStack credentials
|
||||
},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://api.hubic.com/oauth/auth/",
|
||||
TokenURL: "https://api.hubic.com/oauth/token/",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "hubic",
|
||||
Description: "Hubic",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
||||
})
|
||||
}
|
||||
|
||||
// credentials is the JSON returned from the Hubic API to read the
|
||||
// OpenStack credentials
|
||||
type credentials struct {
|
||||
Token string `json:"token"` // OpenStack token
|
||||
Endpoint string `json:"endpoint"` // OpenStack endpoint
|
||||
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
|
||||
}
|
||||
|
||||
// Fs represents a remote hubic
|
||||
type Fs struct {
|
||||
fs.Fs // wrapped Fs
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // client for oauth api
|
||||
credentials credentials // returned from the Hubic API
|
||||
expires time.Time // time credentials expire
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
type Object struct {
|
||||
*swift.Object
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Object.String()
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.Fs == nil {
|
||||
return "Hubic"
|
||||
}
|
||||
return fmt.Sprintf("Hubic %s", f.Fs.String())
|
||||
}
|
||||
|
||||
// getCredentials reads the OpenStack Credentials using the Hubic API
|
||||
//
|
||||
// The credentials are read into the Fs
|
||||
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
bodyStr := strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " "))
|
||||
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
}
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
var result credentials
|
||||
err = decoder.Decode(&result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(f, "Got credentials %+v", result)
|
||||
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
|
||||
return errors.New("couldn't read token, result and expired from credentials")
|
||||
}
|
||||
f.credentials = result
|
||||
expires, err := time.Parse(time.RFC3339, result.Expires)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.expires = expires
|
||||
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, time.Until(f.expires))
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
client: client,
|
||||
}
|
||||
|
||||
// Make the swift Connection
|
||||
ci := fs.GetConfig(ctx)
|
||||
c := &swiftLib.Connection{
|
||||
Auth: newAuth(f),
|
||||
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(ctx),
|
||||
}
|
||||
err = c.Authenticate(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
|
||||
}
|
||||
|
||||
// Parse config into swift.Options struct
|
||||
opt := new(swift.Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make inner swift Fs from the connection
|
||||
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
f.Fs = swiftFs
|
||||
f.features = f.Fs.Features().Wrap(f)
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.Fs
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
)
|
|
@ -1,19 +0,0 @@
|
|||
// Test Hubic filesystem interface
|
||||
package hubic_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/hubic"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestHubic:",
|
||||
NilObject: (*hubic.Object)(nil),
|
||||
SkipFsCheckWrap: true,
|
||||
SkipObjectCheckWrap: true,
|
||||
})
|
||||
}
|
|
@ -40,7 +40,7 @@ const (
|
|||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
// SharedOptions are shared between swift and hubic
|
||||
// SharedOptions are shared between swift and backends which depend on swift
|
||||
var SharedOptions = []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
Help: `Above this size files will be chunked into a _segments container.
|
||||
|
|
|
@ -49,7 +49,6 @@ docs = [
|
|||
"hdfs.md",
|
||||
"hidrive.md",
|
||||
"http.md",
|
||||
"hubic.md",
|
||||
"internetarchive.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
|
|
|
@ -235,8 +235,8 @@ applications won't work with their files on an rclone mount without
|
|||
|--vfs-cache-mode writes| or |--vfs-cache-mode full|.
|
||||
See the [VFS File Caching](#vfs-file-caching) section for more info.
|
||||
|
||||
The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2,
|
||||
Hubic) do not support the concept of empty directories, so empty
|
||||
The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2)
|
||||
do not support the concept of empty directories, so empty
|
||||
directories will have a tendency to disappear once they fall out of
|
||||
the directory cache.
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@ TestDropbox:
|
|||
TestFichier:
|
||||
TestFTP:
|
||||
TestGoogleCloudStorage:
|
||||
TestHubic:
|
||||
TestNetStorage:
|
||||
TestOneDrive:
|
||||
TestPcloud:
|
||||
|
|
|
@ -15,5 +15,3 @@ exec rclone --check-normalization=true --check-control=true --check-length=true
|
|||
TestSwift:testInfo \
|
||||
TestYandex:testInfo \
|
||||
TestFTP:testInfo
|
||||
|
||||
# TestHubic:testInfo \
|
||||
|
|
|
@ -129,7 +129,6 @@ WebDAV or S3, that work out of the box.)
|
|||
{{< provider name="Hetzner Storage Box" home="https://www.hetzner.com/storage/storage-box" config="/sftp/#hetzner-storage-box" >}}
|
||||
{{< provider name="HiDrive" home="https://www.strato.de/cloud-speicher/" config="/hidrive/" >}}
|
||||
{{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}}
|
||||
{{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}}
|
||||
{{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}}
|
||||
{{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
|
|
|
@ -225,7 +225,7 @@ If you intend to use the wrapped remote both directly for keeping
|
|||
unencrypted content, as well as through a crypt remote for encrypted
|
||||
content, it is recommended to point the crypt remote to a separate
|
||||
directory within the wrapped remote. If you use a bucket-based storage
|
||||
system (e.g. Swift, S3, Google Compute Storage, B2, Hubic) it is generally
|
||||
system (e.g. Swift, S3, Google Compute Storage, B2) it is generally
|
||||
advisable to wrap the crypt remote around a specific bucket (`s3:bucket`).
|
||||
If wrapping around the entire root of the storage (`s3:`), and use the
|
||||
optional file name encryption, rclone will encrypt the bucket name.
|
||||
|
|
|
@ -51,7 +51,6 @@ See the following for detailed instructions for
|
|||
* [HDFS](/hdfs/)
|
||||
* [HiDrive](/hidrive/)
|
||||
* [HTTP](/http/)
|
||||
* [Hubic](/hubic/)
|
||||
* [Internet Archive](/internetarchive/)
|
||||
* [Jottacloud](/jottacloud/)
|
||||
* [Koofr](/koofr/)
|
||||
|
@ -1955,7 +1954,7 @@ quickly using the least amount of memory.
|
|||
|
||||
However, some remotes have a way of listing all files beneath a
|
||||
directory in one (or a small number) of transactions. These tend to
|
||||
be the bucket-based remotes (e.g. S3, B2, GCS, Swift, Hubic).
|
||||
be the bucket-based remotes (e.g. S3, B2, GCS, Swift).
|
||||
|
||||
If you use the `--fast-list` flag then rclone will use this method for
|
||||
listing directories. This will have the following consequences for
|
||||
|
|
|
@ -1,238 +0,0 @@
|
|||
---
|
||||
title: "Hubic"
|
||||
description: "Rclone docs for Hubic"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-space-shuttle" >}} Hubic
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths are specified as `remote:container` (or `remote:` for the `lsd`
|
||||
command.) You may put subdirectories in too, e.g. `remote:container/path/to/dir`.
|
||||
|
||||
## Configuration
|
||||
|
||||
The initial setup for Hubic involves getting a token from Hubic which
|
||||
you need to do in your browser. `rclone config` walks you through it.
|
||||
|
||||
Here is an example of how to make a remote called `remote`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
n/s> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
[snip]
|
||||
XX / Hubic
|
||||
\ "hubic"
|
||||
[snip]
|
||||
Storage> hubic
|
||||
Hubic Client Id - leave blank normally.
|
||||
client_id>
|
||||
Hubic Client Secret - leave blank normally.
|
||||
client_secret>
|
||||
Remote config
|
||||
Use auto config?
|
||||
* Say Y if not sure
|
||||
* Say N if you are working on a remote or headless machine
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
--------------------
|
||||
[remote]
|
||||
client_id =
|
||||
client_secret =
|
||||
token = {"access_token":"XXXXXX"}
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Hubic. This only runs from the moment it opens
|
||||
your browser to the moment you get back the verification code. This
|
||||
is on `http://127.0.0.1:53682/` and this it may require you to unblock
|
||||
it temporarily if you are running a host firewall.
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
List containers in the top level of your Hubic
|
||||
|
||||
rclone lsd remote:
|
||||
|
||||
List all the files in your Hubic
|
||||
|
||||
rclone ls remote:
|
||||
|
||||
To copy a local directory to an Hubic directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
If you want the directory to be visible in the official *Hubic
|
||||
browser*, you need to copy your files to the `default` directory
|
||||
|
||||
rclone copy /home/source remote:default/backup
|
||||
|
||||
### --fast-list ###
|
||||
|
||||
This remote supports `--fast-list` which allows you to use fewer
|
||||
transactions in exchange for more memory. See the [rclone
|
||||
docs](/docs/#fast-list) for more details.
|
||||
|
||||
### Modified time ###
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
`X-Object-Meta-Mtime` as floating point since the epoch accurate to 1
|
||||
ns.
|
||||
|
||||
This is a de facto standard (used in the official python-swiftclient
|
||||
amongst others) for storing the modification time for an object.
|
||||
|
||||
Note that Hubic wraps the Swift backend, so most of the properties of
|
||||
are the same.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/hubic/hubic.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to hubic (Hubic).
|
||||
|
||||
#### --hubic-client-id
|
||||
|
||||
OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_HUBIC_CLIENT_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --hubic-client-secret
|
||||
|
||||
OAuth Client Secret.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_HUBIC_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to hubic (Hubic).
|
||||
|
||||
#### --hubic-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_HUBIC_TOKEN
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --hubic-auth-url
|
||||
|
||||
Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_HUBIC_AUTH_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --hubic-token-url
|
||||
|
||||
Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_HUBIC_TOKEN_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --hubic-chunk-size
|
||||
|
||||
Above this size files will be chunked into a _segments container.
|
||||
|
||||
Above this size files will be chunked into a _segments container. The
|
||||
default for this is 5 GiB which is its maximum value.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_HUBIC_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
- Default: 5Gi
|
||||
|
||||
#### --hubic-no-chunk
|
||||
|
||||
Don't chunk files during streaming upload.
|
||||
|
||||
When doing streaming uploads (e.g. using rcat or mount) setting this
|
||||
flag will cause the swift backend to not upload chunked files.
|
||||
|
||||
This will limit the maximum upload size to 5 GiB. However non chunked
|
||||
files are easier to deal with and have an MD5SUM.
|
||||
|
||||
Rclone will still chunk files bigger than chunk_size when doing normal
|
||||
copy operations.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_chunk
|
||||
- Env Var: RCLONE_HUBIC_NO_CHUNK
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --hubic-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_HUBIC_ENCODING
|
||||
- Type: MultiEncoder
|
||||
- Default: Slash,InvalidUtf8
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
|
||||
## Limitations
|
||||
|
||||
This uses the normal OpenStack Swift mechanism to refresh the Swift
|
||||
API credentials and ignores the expires field returned by the Hubic
|
||||
API.
|
||||
|
||||
The Swift API doesn't return a correct MD5SUM for segmented files
|
||||
(Dynamic or Static Large Objects) so rclone won't check or use the
|
||||
MD5SUM for these.
|
|
@ -32,7 +32,6 @@ Here is an overview of the major features of each cloud storage system.
|
|||
| HDFS | - | R/W | No | No | - | - |
|
||||
| HiDrive | HiDrive ¹² | R/W | No | No | - | - |
|
||||
| HTTP | - | R | No | No | R | - |
|
||||
| Hubic | MD5 | R/W | No | No | R/W | - |
|
||||
| Internet Archive | MD5, SHA1, CRC32 | R/W ¹¹ | No | No | - | RWU |
|
||||
| Jottacloud | MD5 | R/W | Yes | No | R | - |
|
||||
| Koofr | MD5 | - | Yes | No | - | - |
|
||||
|
@ -484,7 +483,6 @@ upon backend-specific capabilities.
|
|||
| HDFS | Yes | No | Yes | Yes | No | No | Yes | No | Yes | Yes |
|
||||
| HiDrive | Yes | Yes | Yes | Yes | No | No | Yes | No | No | Yes |
|
||||
| HTTP | No | No | No | No | No | No | No | No | No | Yes |
|
||||
| Hubic | Yes † | Yes | No | No | No | Yes | Yes | No | Yes | No |
|
||||
| Internet Archive | No | Yes | No | No | Yes | Yes | No | Yes | Yes | No |
|
||||
| Jottacloud | Yes | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes |
|
||||
| Koofr | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes | Yes |
|
||||
|
@ -516,7 +514,7 @@ upon backend-specific capabilities.
|
|||
This deletes a directory quicker than just deleting all the files in
|
||||
the directory.
|
||||
|
||||
† Note Swift, Hubic, and Storj implement this in order to delete
|
||||
† Note Swift and Storj implement this in order to delete
|
||||
directory markers but they don't actually have a quicker way of deleting
|
||||
files other than deleting them individually.
|
||||
|
||||
|
|
|
@ -74,7 +74,6 @@
|
|||
<a class="dropdown-item" href="/hdfs/"><i class="fa fa-globe fa-fw"></i> HDFS (Hadoop Distributed Filesystem)</a>
|
||||
<a class="dropdown-item" href="/hidrive/"><i class="fa fa-cloud fa-fw"></i> HiDrive</a>
|
||||
<a class="dropdown-item" href="/http/"><i class="fa fa-globe fa-fw"></i> HTTP</a>
|
||||
<a class="dropdown-item" href="/hubic/"><i class="fa fa-space-shuttle fa-fw"></i> Hubic</a>
|
||||
<a class="dropdown-item" href="/internetarchive/"><i class="fa fa-archive fa-fw"></i> Internet Archive</a>
|
||||
<a class="dropdown-item" href="/jottacloud/"><i class="fa fa-cloud fa-fw"></i> Jottacloud</a>
|
||||
<a class="dropdown-item" href="/koofr/"><i class="fa fa-suitcase fa-fw"></i> Koofr</a>
|
||||
|
|
|
@ -136,9 +136,6 @@ backends:
|
|||
- backend: "hidrive"
|
||||
remote: "TestHiDrive:"
|
||||
fastlist: false
|
||||
- backend: "hubic"
|
||||
remote: "TestHubic:"
|
||||
fastlist: false
|
||||
- backend: "internetarchive"
|
||||
remote: "TestIA:rclone-integration-test"
|
||||
fastlist: true
|
||||
|
|
Loading…
Reference in New Issue
Block a user