onedrive: Removed upload cutoff and always do session uploads

Set modtime on copy


Added versioning issue to OneDrive documentation
This commit is contained in:
Oliver Heyme 2018-03-15 08:06:17 +01:00 committed by Nick Craig-Wood
parent 078d705dbe
commit 7f744033d8
3 changed files with 64 additions and 50 deletions

View File

@ -177,6 +177,11 @@ type SetFileSystemInfo struct {
FileSystemInfo FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
}
// CreateUploadRequest is used by CreateUploadSession to set the dates correctly
type CreateUploadRequest struct {
Item SetFileSystemInfo `json:"item"`
}
// CreateUploadResponse is the response from creating an upload session
type CreateUploadResponse struct {
UploadURL string `json:"uploadUrl"` // "https://sn3302.up.1drv.com/up/fe6987415ace7X4e1eF866337",

View File

@ -73,8 +73,7 @@ var (
}
oauthBusinessResource = oauth2.SetAuthURLParam("resource", discoveryServiceURL)
chunkSize = fs.SizeSuffix(10 * 1024 * 1024)
uploadCutoff = fs.SizeSuffix(10 * 1024 * 1024)
chunkSize = fs.SizeSuffix(10 * 1024 * 1024)
)
// Register with Fs
@ -228,7 +227,6 @@ func init() {
})
flags.VarP(&chunkSize, "onedrive-chunk-size", "", "Above this size files will be chunked - must be multiple of 320k.")
flags.VarP(&uploadCutoff, "onedrive-upload-cutoff", "", "Cutoff for switching to chunked upload - must be <= 100MB")
}
// Fs represents a remote one drive
@ -842,6 +840,15 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
if err != nil {
return nil, err
}
// Copy does NOT copy the modTime from the source and there seems to
// be no way to set date before
// This will create TWO versions on OneDrive
err = dstObj.SetModTime(srcObj.ModTime())
if err != nil {
return nil, err
}
return dstObj, nil
}
@ -1088,21 +1095,23 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
}
// createUploadSession creates an upload session for the object
func (o *Object) createUploadSession() (response *api.CreateUploadResponse, err error) {
func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUploadResponse, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/upload.createSession",
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/createUploadSession",
}
createRequest := api.CreateUploadRequest{}
createRequest.Item.FileSystemInfo.LastModifiedDateTime = api.Timestamp(modTime)
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
resp, err = o.fs.srv.CallJSON(&opts, &createRequest, &response)
return shouldRetry(resp, err)
})
return
return response, err
}
// uploadFragment uploads a part
func (o *Object) uploadFragment(url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (err error) {
func (o *Object) uploadFragment(url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64, info *api.Item) (err error) {
opts := rest.Opts{
Method: "PUT",
RootURL: url,
@ -1110,12 +1119,21 @@ func (o *Object) uploadFragment(url string, start int64, totalSize int64, chunk
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start, start+chunkSize-1, totalSize),
Body: chunk,
}
var response api.UploadFragmentResponse
// var response api.UploadFragmentResponse
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
_, _ = chunk.Seek(0, 0)
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
return shouldRetry(resp, err)
resp, err = o.fs.srv.Call(&opts)
retry, err := shouldRetry(resp, err)
if !retry && resp != nil {
if resp.StatusCode == 200 || resp.StatusCode == 201 {
// we are done :)
// read the item
defer fs.CheckClose(resp.Body, &err)
return false, json.NewDecoder(resp.Body).Decode(info)
}
}
return retry, err
})
return err
}
@ -1136,14 +1154,14 @@ func (o *Object) cancelUploadSession(url string) (err error) {
}
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(in io.Reader, size int64) (err error) {
func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time, info *api.Item) (err error) {
if chunkSize%(320*1024) != 0 {
return errors.Errorf("chunk size %d is not a multiple of 320k", chunkSize)
}
// Create upload session
fs.Debugf(o, "Starting multipart upload")
session, err := o.createUploadSession()
session, err := o.createUploadSession(modTime)
if err != nil {
return err
}
@ -1170,7 +1188,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64) (err error) {
}
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
err = o.uploadFragment(uploadURL, position, size, seg, n)
err = o.uploadFragment(uploadURL, position, size, seg, n, info)
if err != nil {
return err
}
@ -1191,37 +1209,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
size := src.Size()
modTime := src.ModTime()
var info *api.Item
if size <= int64(uploadCutoff) {
// This is for less than 100 MB of content
var resp *http.Response
opts := rest.Opts{
Method: "PUT",
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
ContentLength: &size,
Body: in,
}
// for go1.8 (see release notes) we must nil the Body if we want a
// "Content-Length: 0" header which onedrive requires for all files.
if size == 0 {
opts.Body = nil
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
return shouldRetry(resp, err)
})
if err != nil {
return err
}
err = o.setMetaData(info)
} else {
err = o.uploadMultipart(in, size)
}
if err != nil {
return err
}
// Set the mod time now and read metadata
info, err = o.setModTime(modTime)
info := &api.Item{}
err = o.uploadMultipart(in, size, modTime, info)
if err != nil {
return err
}

View File

@ -153,11 +153,6 @@ system.
Above this size files will be chunked - must be multiple of 320k. The
default is 10MB. Note that the chunks will be buffered into memory.
#### --onedrive-upload-cutoff=SIZE ####
Cutoff for switching to chunked upload - must be <= 100MB. The default
is 10MB.
### Limitations ###
Note that OneDrive is case insensitive so you can't have a
@ -170,3 +165,28 @@ identical looking unicode equivalent. For example if a file has a `?`
in it will be mapped to `` instead.
The largest allowed file size is 10GiB (10,737,418,240 bytes).
### Versioning issue ###
Every change in OneDrive causes the service to create a new version.
This counts against a users quota.
For example changing the modification time of a file creates a second
version, so the file is using twice the space.
The `copy` is the only rclone command affected by this as we copy
the file and then afterwards set the modification time to match the
source file.
User [Weropol](https://github.com/Weropol) has found a method to disable
versioning on OneDrive
1. Open the settings menu by clicking on the gear symbol at the top of the OneDrive Business page.
2. Click Site settings.
3. Once on the Site settings page, navigate to Site Administration > Site libraries and lists.
4. Click Customize "Documents".
5. Click General Settings > Versioning Settings.
6. Under Document Version History select the option No versioning.
Note: This will disable the creation of new file versions, but will not remove any previous versions. Your documents are safe.
7. Apply the changes by clicking OK.
8. Use rclone to upload or modify files. (I also use the --no-update-modtime flag)
9. Restore the versioning settings after using rclone. (Optional)