mirror of
https://github.com/rclone/rclone.git
synced 2024-11-22 11:21:10 +08:00
googlephotos: Backend for accessing Google Photos #369
This commit is contained in:
parent
5eebbaaac4
commit
a1cfe61ffd
|
@ -33,6 +33,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
|||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
_ "github.com/ncw/rclone/backend/fichier"
|
||||
_ "github.com/ncw/rclone/backend/ftp"
|
||||
_ "github.com/ncw/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/ncw/rclone/backend/googlephotos"
|
||||
_ "github.com/ncw/rclone/backend/http"
|
||||
_ "github.com/ncw/rclone/backend/hubic"
|
||||
_ "github.com/ncw/rclone/backend/jottacloud"
|
||||
|
|
148
backend/googlephotos/albums.go
Normal file
148
backend/googlephotos/albums.go
Normal file
|
@ -0,0 +1,148 @@
|
|||
// This file contains the albums abstraction
|
||||
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/backend/googlephotos/api"
|
||||
)
|
||||
|
||||
// All the albums
|
||||
type albums struct {
|
||||
mu sync.Mutex
|
||||
dupes map[string][]*api.Album // duplicated names
|
||||
byID map[string]*api.Album //..indexed by ID
|
||||
byTitle map[string]*api.Album //..indexed by Title
|
||||
path map[string][]string // partial album names to directory
|
||||
}
|
||||
|
||||
// Create a new album
|
||||
func newAlbums() *albums {
|
||||
return &albums{
|
||||
dupes: map[string][]*api.Album{},
|
||||
byID: map[string]*api.Album{},
|
||||
byTitle: map[string]*api.Album{},
|
||||
path: map[string][]string{},
|
||||
}
|
||||
}
|
||||
|
||||
// add an album
|
||||
func (as *albums) add(album *api.Album) {
|
||||
// Munge the name of the album into a sensible path name
|
||||
album.Title = path.Clean(album.Title)
|
||||
if album.Title == "." || album.Title == "/" {
|
||||
album.Title = addID("", album.ID)
|
||||
}
|
||||
|
||||
as.mu.Lock()
|
||||
as._add(album)
|
||||
as.mu.Unlock()
|
||||
}
|
||||
|
||||
// _add an album - call with lock held
|
||||
func (as *albums) _add(album *api.Album) {
|
||||
// update dupes by title
|
||||
dupes := as.dupes[album.Title]
|
||||
dupes = append(dupes, album)
|
||||
as.dupes[album.Title] = dupes
|
||||
|
||||
// Dedupe the album name if necessary
|
||||
if len(dupes) >= 2 {
|
||||
// If this is the first dupe, then need to adjust the first one
|
||||
if len(dupes) == 2 {
|
||||
firstAlbum := dupes[0]
|
||||
as._del(firstAlbum)
|
||||
as._add(firstAlbum)
|
||||
// undo add of firstAlbum to dupes
|
||||
as.dupes[album.Title] = dupes
|
||||
}
|
||||
album.Title = addID(album.Title, album.ID)
|
||||
}
|
||||
|
||||
// Store the new album
|
||||
as.byID[album.ID] = album
|
||||
as.byTitle[album.Title] = album
|
||||
|
||||
// Store the partial paths
|
||||
dir, leaf := album.Title, ""
|
||||
for dir != "" {
|
||||
i := strings.LastIndex(dir, "/")
|
||||
if i >= 0 {
|
||||
dir, leaf = dir[:i], dir[i+1:]
|
||||
} else {
|
||||
dir, leaf = "", dir
|
||||
}
|
||||
dirs := as.path[dir]
|
||||
found := false
|
||||
for _, dir := range dirs {
|
||||
if dir == leaf {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
as.path[dir] = append(as.path[dir], leaf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// del an album
|
||||
func (as *albums) del(album *api.Album) {
|
||||
as.mu.Lock()
|
||||
as._del(album)
|
||||
as.mu.Unlock()
|
||||
}
|
||||
|
||||
// _del an album - call with lock held
|
||||
func (as *albums) _del(album *api.Album) {
|
||||
// We leave in dupes so it doesn't cause albums to get renamed
|
||||
|
||||
// Remove from byID and byTitle
|
||||
delete(as.byID, album.ID)
|
||||
delete(as.byTitle, album.Title)
|
||||
|
||||
// Remove from paths
|
||||
dir, leaf := album.Title, ""
|
||||
for dir != "" {
|
||||
// Can't delete if this dir exists anywhere in the path structure
|
||||
if _, found := as.path[dir]; found {
|
||||
break
|
||||
}
|
||||
i := strings.LastIndex(dir, "/")
|
||||
if i >= 0 {
|
||||
dir, leaf = dir[:i], dir[i+1:]
|
||||
} else {
|
||||
dir, leaf = "", dir
|
||||
}
|
||||
dirs := as.path[dir]
|
||||
for i, dir := range dirs {
|
||||
if dir == leaf {
|
||||
dirs = append(dirs[:i], dirs[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(dirs) == 0 {
|
||||
delete(as.path, dir)
|
||||
} else {
|
||||
as.path[dir] = dirs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get an album by title
|
||||
func (as *albums) get(title string) (album *api.Album, ok bool) {
|
||||
as.mu.Lock()
|
||||
defer as.mu.Unlock()
|
||||
album, ok = as.byTitle[title]
|
||||
return album, ok
|
||||
}
|
||||
|
||||
// getDirs gets directories below an album path
|
||||
func (as *albums) getDirs(albumPath string) (dirs []string, ok bool) {
|
||||
as.mu.Lock()
|
||||
defer as.mu.Unlock()
|
||||
dirs, ok = as.path[albumPath]
|
||||
return dirs, ok
|
||||
}
|
311
backend/googlephotos/albums_test.go
Normal file
311
backend/googlephotos/albums_test.go
Normal file
|
@ -0,0 +1,311 @@
|
|||
package googlephotos
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/googlephotos/api"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewAlbums(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
assert.NotNil(t, albums.dupes)
|
||||
assert.NotNil(t, albums.byID)
|
||||
assert.NotNil(t, albums.byTitle)
|
||||
assert.NotNil(t, albums.path)
|
||||
}
|
||||
|
||||
func TestAlbumsAdd(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{}, albums.path)
|
||||
|
||||
a1 := &api.Album{
|
||||
Title: "one",
|
||||
ID: "1",
|
||||
}
|
||||
albums.add(a1)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one"},
|
||||
}, albums.path)
|
||||
|
||||
a2 := &api.Album{
|
||||
Title: "two",
|
||||
ID: "2",
|
||||
}
|
||||
albums.add(a2)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
"two": a2,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two"},
|
||||
}, albums.path)
|
||||
|
||||
// Add a duplicate
|
||||
a2a := &api.Album{
|
||||
Title: "two",
|
||||
ID: "2a",
|
||||
}
|
||||
albums.add(a2a)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
}, albums.path)
|
||||
|
||||
// Add a sub directory
|
||||
a1sub := &api.Album{
|
||||
Title: "one/sub",
|
||||
ID: "1sub",
|
||||
}
|
||||
albums.add(a1sub)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
"one/sub": a1sub,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
// Add a weird path
|
||||
a0 := &api.Album{
|
||||
Title: "/../././..////.",
|
||||
ID: "0",
|
||||
}
|
||||
albums.add(a0)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"{0}": []*api.Album{a0},
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"0": a0,
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"{0}": a0,
|
||||
"one": a1,
|
||||
"one/sub": a1sub,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}", "{0}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
}
|
||||
|
||||
func TestAlbumsDel(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
|
||||
a1 := &api.Album{
|
||||
Title: "one",
|
||||
ID: "1",
|
||||
}
|
||||
albums.add(a1)
|
||||
|
||||
a2 := &api.Album{
|
||||
Title: "two",
|
||||
ID: "2",
|
||||
}
|
||||
albums.add(a2)
|
||||
|
||||
// Add a duplicate
|
||||
a2a := &api.Album{
|
||||
Title: "two",
|
||||
ID: "2a",
|
||||
}
|
||||
albums.add(a2a)
|
||||
|
||||
// Add a sub directory
|
||||
a1sub := &api.Album{
|
||||
Title: "one/sub",
|
||||
ID: "1sub",
|
||||
}
|
||||
albums.add(a1sub)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
"one/sub": a1sub,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a1)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one/sub": a1sub,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a2)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one/sub": a1sub,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a2a)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one/sub": a1sub,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a1sub)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{}, albums.path)
|
||||
}
|
||||
|
||||
func TestAlbumsGet(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
|
||||
a1 := &api.Album{
|
||||
Title: "one",
|
||||
ID: "1",
|
||||
}
|
||||
albums.add(a1)
|
||||
|
||||
album, ok := albums.get("one")
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, a1, album)
|
||||
|
||||
album, ok = albums.get("notfound")
|
||||
assert.Equal(t, false, ok)
|
||||
assert.Nil(t, album)
|
||||
}
|
||||
|
||||
func TestAlbumsGetDirs(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
|
||||
a1 := &api.Album{
|
||||
Title: "one",
|
||||
ID: "1",
|
||||
}
|
||||
albums.add(a1)
|
||||
|
||||
dirs, ok := albums.getDirs("")
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, []string{"one"}, dirs)
|
||||
|
||||
dirs, ok = albums.getDirs("notfound")
|
||||
assert.Equal(t, false, ok)
|
||||
assert.Nil(t, dirs)
|
||||
}
|
190
backend/googlephotos/api/types.go
Normal file
190
backend/googlephotos/api/types.go
Normal file
|
@ -0,0 +1,190 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrorDetails in the internals of the Error type
|
||||
type ErrorDetails struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// Error is returned on errors
|
||||
type Error struct {
|
||||
Details ErrorDetails `json:"error"`
|
||||
}
|
||||
|
||||
// Error statisfies error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
|
||||
}
|
||||
|
||||
// Album of photos
|
||||
type Album struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
Title string `json:"title"`
|
||||
ProductURL string `json:"productUrl,omitempty"`
|
||||
MediaItemsCount string `json:"mediaItemsCount,omitempty"`
|
||||
CoverPhotoBaseURL string `json:"coverPhotoBaseUrl,omitempty"`
|
||||
CoverPhotoMediaItemID string `json:"coverPhotoMediaItemId,omitempty"`
|
||||
IsWriteable bool `json:"isWriteable,omitempty"`
|
||||
}
|
||||
|
||||
// ListAlbums is returned from albums.list and sharedAlbums.list
|
||||
type ListAlbums struct {
|
||||
Albums []Album `json:"albums"`
|
||||
SharedAlbums []Album `json:"sharedAlbums"`
|
||||
NextPageToken string `json:"nextPageToken"`
|
||||
}
|
||||
|
||||
// CreateAlbum creates an Album
|
||||
type CreateAlbum struct {
|
||||
Album *Album `json:"album"`
|
||||
}
|
||||
|
||||
// MediaItem is a photo or video
|
||||
type MediaItem struct {
|
||||
ID string `json:"id"`
|
||||
ProductURL string `json:"productUrl"`
|
||||
BaseURL string `json:"baseUrl"`
|
||||
MimeType string `json:"mimeType"`
|
||||
MediaMetadata struct {
|
||||
CreationTime time.Time `json:"creationTime"`
|
||||
Width string `json:"width"`
|
||||
Height string `json:"height"`
|
||||
Photo struct {
|
||||
} `json:"photo"`
|
||||
} `json:"mediaMetadata"`
|
||||
Filename string `json:"filename"`
|
||||
}
|
||||
|
||||
// MediaItems is returned from mediaitems.list, mediaitems.search
|
||||
type MediaItems struct {
|
||||
MediaItems []MediaItem `json:"mediaItems"`
|
||||
NextPageToken string `json:"nextPageToken"`
|
||||
}
|
||||
|
||||
//Content categories
|
||||
// NONE Default content category. This category is ignored when any other category is used in the filter.
|
||||
// LANDSCAPES Media items containing landscapes.
|
||||
// RECEIPTS Media items containing receipts.
|
||||
// CITYSCAPES Media items containing cityscapes.
|
||||
// LANDMARKS Media items containing landmarks.
|
||||
// SELFIES Media items that are selfies.
|
||||
// PEOPLE Media items containing people.
|
||||
// PETS Media items containing pets.
|
||||
// WEDDINGS Media items from weddings.
|
||||
// BIRTHDAYS Media items from birthdays.
|
||||
// DOCUMENTS Media items containing documents.
|
||||
// TRAVEL Media items taken during travel.
|
||||
// ANIMALS Media items containing animals.
|
||||
// FOOD Media items containing food.
|
||||
// SPORT Media items from sporting events.
|
||||
// NIGHT Media items taken at night.
|
||||
// PERFORMANCES Media items from performances.
|
||||
// WHITEBOARDS Media items containing whiteboards.
|
||||
// SCREENSHOTS Media items that are screenshots.
|
||||
// UTILITY Media items that are considered to be utility. These include, but aren't limited to documents, screenshots, whiteboards etc.
|
||||
// ARTS Media items containing art.
|
||||
// CRAFTS Media items containing crafts.
|
||||
// FASHION Media items related to fashion.
|
||||
// HOUSES Media items containing houses.
|
||||
// GARDENS Media items containing gardens.
|
||||
// FLOWERS Media items containing flowers.
|
||||
// HOLIDAYS Media items taken of holidays.
|
||||
|
||||
// MediaTypes
|
||||
// ALL_MEDIA Treated as if no filters are applied. All media types are included.
|
||||
// VIDEO All media items that are considered videos. This also includes movies the user has created using the Google Photos app.
|
||||
// PHOTO All media items that are considered photos. This includes .bmp, .gif, .ico, .jpg (and other spellings), .tiff, .webp and special photo types such as iOS live photos, Android motion photos, panoramas, photospheres.
|
||||
|
||||
// Features
|
||||
// NONE Treated as if no filters are applied. All features are included.
|
||||
// FAVORITES Media items that the user has marked as favorites in the Google Photos app.
|
||||
|
||||
// Date is used as part of SearchFilter
|
||||
type Date struct {
|
||||
Year int `json:"year,omitempty"`
|
||||
Month int `json:"month,omitempty"`
|
||||
Day int `json:"day,omitempty"`
|
||||
}
|
||||
|
||||
// DateFilter is uses to add date ranges to media item queries
|
||||
type DateFilter struct {
|
||||
Dates []Date `json:"dates,omitempty"`
|
||||
Ranges []struct {
|
||||
StartDate Date `json:"startDate,omitempty"`
|
||||
EndDate Date `json:"endDate,omitempty"`
|
||||
} `json:"ranges,omitempty"`
|
||||
}
|
||||
|
||||
// ContentFilter is uses to add content categories to media item queries
|
||||
type ContentFilter struct {
|
||||
IncludedContentCategories []string `json:"includedContentCategories,omitempty"`
|
||||
ExcludedContentCategories []string `json:"excludedContentCategories,omitempty"`
|
||||
}
|
||||
|
||||
// MediaTypeFilter is uses to add media types to media item queries
|
||||
type MediaTypeFilter struct {
|
||||
MediaTypes []string `json:"mediaTypes,omitempty"`
|
||||
}
|
||||
|
||||
// FeatureFilter is uses to add features to media item queries
|
||||
type FeatureFilter struct {
|
||||
IncludedFeatures []string `json:"includedFeatures,omitempty"`
|
||||
}
|
||||
|
||||
// Filters combines all the filter types for media item queries
|
||||
type Filters struct {
|
||||
DateFilter *DateFilter `json:"dateFilter,omitempty"`
|
||||
ContentFilter *ContentFilter `json:"contentFilter,omitempty"`
|
||||
MediaTypeFilter *MediaTypeFilter `json:"mediaTypeFilter,omitempty"`
|
||||
FeatureFilter *FeatureFilter `json:"featureFilter,omitempty"`
|
||||
IncludeArchivedMedia *bool `json:"includeArchivedMedia,omitempty"`
|
||||
ExcludeNonAppCreatedData *bool `json:"excludeNonAppCreatedData,omitempty"`
|
||||
}
|
||||
|
||||
// SearchFilter is uses with mediaItems.search
|
||||
type SearchFilter struct {
|
||||
AlbumID string `json:"albumId,omitempty"`
|
||||
PageSize int `json:"pageSize"`
|
||||
PageToken string `json:"pageToken,omitempty"`
|
||||
Filters *Filters `json:"filters,omitempty"`
|
||||
}
|
||||
|
||||
// SimpleMediaItem is part of NewMediaItem
|
||||
type SimpleMediaItem struct {
|
||||
UploadToken string `json:"uploadToken"`
|
||||
}
|
||||
|
||||
// NewMediaItem is a single media item for upload
|
||||
type NewMediaItem struct {
|
||||
Description string `json:"description"`
|
||||
SimpleMediaItem SimpleMediaItem `json:"simpleMediaItem"`
|
||||
}
|
||||
|
||||
// BatchCreateRequest creates media items from upload tokens
|
||||
type BatchCreateRequest struct {
|
||||
AlbumID string `json:"albumId,omitempty"`
|
||||
NewMediaItems []NewMediaItem `json:"newMediaItems"`
|
||||
}
|
||||
|
||||
// BatchCreateResponse is returned from BatchCreateRequest
|
||||
type BatchCreateResponse struct {
|
||||
NewMediaItemResults []struct {
|
||||
UploadToken string `json:"uploadToken"`
|
||||
Status struct {
|
||||
Message string `json:"message"`
|
||||
Code int `json:"code"`
|
||||
} `json:"status"`
|
||||
MediaItem MediaItem `json:"mediaItem"`
|
||||
} `json:"newMediaItemResults"`
|
||||
}
|
||||
|
||||
// BatchRemoveItems is for removing items from an album
|
||||
type BatchRemoveItems struct {
|
||||
MediaItemIds []string `json:"mediaItemIds"`
|
||||
}
|
962
backend/googlephotos/googlephotos.go
Normal file
962
backend/googlephotos/googlephotos.go
Normal file
|
@ -0,0 +1,962 @@
|
|||
// Package googlephotos provides an interface to Google Photos
|
||||
package googlephotos
|
||||
|
||||
// FIXME Resumable uploads not implemented - rclone can't resume uploads in general
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
golog "log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/googlephotos/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/dirtree"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/log"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
var (
|
||||
errCantUpload = errors.New("can't upload files here")
|
||||
errCantMkdir = errors.New("can't make directories here")
|
||||
errCantRmdir = errors.New("can't remove this directory")
|
||||
errAlbumDelete = errors.New("google photos API does not implement deleting albums")
|
||||
errRemove = errors.New("google photos API only implements removing files from albums")
|
||||
errOwnAlbums = errors.New("google photos API only allows uploading to albums rclone created")
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "202264815644-rt1o1c9evjaotbpbab10m83i8cnjk077.apps.googleusercontent.com"
|
||||
rcloneEncryptedClientSecret = "kLJLretPefBgrDHosdml_nlF64HZ9mUcO85X5rdjYBPP8ChA-jr3Ow"
|
||||
rootURL = "https://photoslibrary.googleapis.com/v1"
|
||||
listChunks = 100 // chunk size to read directory listings
|
||||
albumChunks = 50 // chunk size to read album listings
|
||||
minSleep = 10 * time.Millisecond
|
||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: []string{
|
||||
scopeReadWrite,
|
||||
},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "google photos",
|
||||
Prefix: "gphotos",
|
||||
Description: "Google Photos",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Fill in the scopes
|
||||
if opt.ReadOnly {
|
||||
oauthConfig.Scopes[0] = scopeReadOnly
|
||||
} else {
|
||||
oauthConfig.Scopes[0] = scopeReadWrite
|
||||
}
|
||||
|
||||
// Do the oauth
|
||||
err = oauthutil.Config("google photos", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
golog.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
|
||||
// Warn the user
|
||||
fmt.Print(`
|
||||
*** IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||
*** are stored in full resolution at original quality. These uploads
|
||||
*** will count towards storage in your Google Account.
|
||||
|
||||
`)
|
||||
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "read_only",
|
||||
Default: false,
|
||||
Help: `Set to make the Google Photos backend read only.
|
||||
|
||||
If you choose read only then rclone will only request read only access
|
||||
to your photos, otherwise rclone will request full access.`,
|
||||
}, {
|
||||
Name: "read_size",
|
||||
Default: false,
|
||||
Help: `Set to read the size of media items.
|
||||
|
||||
Normally rclone does not read the size of media items since this takes
|
||||
another transaction. This isn't necessary for syncing. However
|
||||
rclone mount needs to know the size of files in advance of reading
|
||||
them, so setting this flag when using rclone mount is recommended if
|
||||
you want to read the media.`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
startTime time.Time // time Fs was started - used for datestamps
|
||||
albums map[bool]*albums // albums, shared or not
|
||||
uploadedMu sync.Mutex // to protect the below
|
||||
uploaded dirtree.DirTree // record of uploaded items
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
url string // download path
|
||||
id string // ID of this object
|
||||
bytes int64 // Bytes in the object
|
||||
modTime time.Time // Modified time of the object
|
||||
mimeType string
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Google Photos path %q", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// dirTime returns the time to set a directory to
|
||||
func (f *Fs) dirTime() time.Time {
|
||||
return f.startTime
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
body, err := rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
body = nil
|
||||
}
|
||||
var e = api.Error{
|
||||
Details: api.ErrorDetails{
|
||||
Code: resp.StatusCode,
|
||||
Message: string(body),
|
||||
Status: resp.Status,
|
||||
},
|
||||
}
|
||||
if body != nil {
|
||||
_ = json.Unmarshal(body, &e)
|
||||
}
|
||||
return &e
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
}
|
||||
|
||||
root = strings.Trim(path.Clean(root), "/")
|
||||
if root == "." || root == "/" {
|
||||
root = ""
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
startTime: time.Now(),
|
||||
albums: map[bool]*albums{},
|
||||
uploaded: dirtree.New(),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
}).Fill(f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
_, _, pattern := patterns.match(f.root, "", true)
|
||||
if pattern != nil && pattern.isFile {
|
||||
oldRoot := f.root
|
||||
var leaf string
|
||||
f.root, leaf = path.Split(f.root)
|
||||
f.root = strings.TrimRight(f.root, "/")
|
||||
_, err := f.NewObject(context.TODO(), leaf)
|
||||
if err == nil {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
f.root = oldRoot
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.MediaItem) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
o.setMetaData(info)
|
||||
} else {
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
defer log.Trace(f, "remote=%q", remote)("")
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// addID adds the ID to name
|
||||
func addID(name string, ID string) string {
|
||||
idStr := "{" + ID + "}"
|
||||
if name == "" {
|
||||
return idStr
|
||||
}
|
||||
return name + " " + idStr
|
||||
}
|
||||
|
||||
// addFileID adds the ID to the fileName passed in
|
||||
func addFileID(fileName string, ID string) string {
|
||||
ext := path.Ext(fileName)
|
||||
base := fileName[:len(fileName)-len(ext)]
|
||||
return addID(base, ID) + ext
|
||||
}
|
||||
|
||||
var idRe = regexp.MustCompile(`\{([A-Za-z0-9_-]{55,})\}`)
|
||||
|
||||
// findID finds an ID in string if one is there or ""
|
||||
func findID(name string) string {
|
||||
match := idRe.FindStringSubmatch(name)
|
||||
if match == nil {
|
||||
return ""
|
||||
}
|
||||
return match[1]
|
||||
}
|
||||
|
||||
// list the albums into an internal cache
|
||||
// FIXME cache invalidation
|
||||
func (f *Fs) listAlbums(shared bool) (all *albums, err error) {
|
||||
all, ok := f.albums[shared]
|
||||
if ok && all != nil {
|
||||
return all, nil
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/albums",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
if shared {
|
||||
opts.Path = "/sharedAlbums"
|
||||
}
|
||||
all = newAlbums()
|
||||
opts.Parameters.Set("pageSize", strconv.Itoa(albumChunks))
|
||||
lastID := ""
|
||||
for {
|
||||
var result api.ListAlbums
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list albums")
|
||||
}
|
||||
newAlbums := result.Albums
|
||||
if shared {
|
||||
newAlbums = result.SharedAlbums
|
||||
}
|
||||
if len(newAlbums) > 0 && newAlbums[0].ID == lastID {
|
||||
// skip first if ID duplicated from last page
|
||||
newAlbums = newAlbums[1:]
|
||||
}
|
||||
if len(newAlbums) > 0 {
|
||||
lastID = newAlbums[len(newAlbums)-1].ID
|
||||
}
|
||||
for i := range newAlbums {
|
||||
all.add(&newAlbums[i])
|
||||
}
|
||||
if result.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
opts.Parameters.Set("pageToken", result.NextPageToken)
|
||||
}
|
||||
f.albums[shared] = all
|
||||
return all, nil
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
type listFn func(remote string, object *api.MediaItem, isDirectory bool) error
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(filter api.SearchFilter, fn listFn) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/mediaItems:search",
|
||||
}
|
||||
filter.PageSize = listChunks
|
||||
filter.PageToken = ""
|
||||
lastID := ""
|
||||
for {
|
||||
var result api.MediaItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &filter, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
items := result.MediaItems
|
||||
if len(items) > 0 && items[0].ID == lastID {
|
||||
// skip first if ID duplicated from last page
|
||||
items = items[1:]
|
||||
}
|
||||
if len(items) > 0 {
|
||||
lastID = items[len(items)-1].ID
|
||||
}
|
||||
for i := range items {
|
||||
item := &result.MediaItems[i]
|
||||
remote := item.Filename
|
||||
remote = strings.Replace(remote, "/", "/", -1)
|
||||
err = fn(remote, item, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if result.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
filter.PageToken = result.NextPageToken
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *api.MediaItem, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
d := fs.NewDir(remote, f.dirTime())
|
||||
return d, nil
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
o.setMetaData(item)
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
|
||||
// List the objects
|
||||
err = f.list(filter, func(remote string, item *api.MediaItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, prefix+remote, item, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Dedupe the file names
|
||||
dupes := map[string]int{}
|
||||
for _, entry := range entries {
|
||||
o, ok := entry.(*Object)
|
||||
if ok {
|
||||
dupes[o.remote]++
|
||||
}
|
||||
}
|
||||
for _, entry := range entries {
|
||||
o, ok := entry.(*Object)
|
||||
if ok {
|
||||
duplicated := dupes[o.remote] > 1
|
||||
if duplicated || o.remote == "" {
|
||||
o.remote = addFileID(o.remote, o.id)
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// listUploads lists a single directory from the uploads
|
||||
func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
f.uploadedMu.Lock()
|
||||
entries, ok := f.uploaded[dir]
|
||||
f.uploadedMu.Unlock()
|
||||
if !ok && dir != "" {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil || pattern.isFile {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
if pattern.toEntries != nil {
|
||||
return pattern.toEntries(ctx, f, prefix, match)
|
||||
}
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
defer log.Trace(f, "src=%+v", src)("")
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// createAlbum creates the album
|
||||
func (f *Fs) createAlbum(ctx context.Context, albumName string) (album *api.Album, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/albums",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
var request = api.CreateAlbum{
|
||||
Album: &api.Album{
|
||||
Title: albumName,
|
||||
},
|
||||
}
|
||||
var result api.Album
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create album")
|
||||
}
|
||||
f.albums[false].add(&result)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// Mkdir creates the album if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
if !pattern.canMkdir {
|
||||
return errCantMkdir
|
||||
}
|
||||
if pattern.isUpload {
|
||||
f.uploadedMu.Lock()
|
||||
d := fs.NewDir(strings.Trim(prefix, "/"), f.dirTime())
|
||||
f.uploaded.AddEntry(d)
|
||||
f.uploadedMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
albumName := match[1]
|
||||
allAlbums, err := f.listAlbums(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, ok := allAlbums.get(albumName)
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
_, err = f.createAlbum(ctx, albumName)
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||
match, _, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
if !pattern.canMkdir {
|
||||
return errCantRmdir
|
||||
}
|
||||
if pattern.isUpload {
|
||||
f.uploadedMu.Lock()
|
||||
err = f.uploaded.Prune(map[string]bool{
|
||||
dir: true,
|
||||
})
|
||||
f.uploadedMu.Unlock()
|
||||
return err
|
||||
}
|
||||
albumName := match[1]
|
||||
allAlbums, err := f.listAlbums(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
album, ok := allAlbums.get(albumName)
|
||||
if !ok {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
_ = album
|
||||
return errAlbumDelete
|
||||
}
|
||||
|
||||
// Precision returns the precision
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
defer log.Trace(o, "")("")
|
||||
if !o.fs.opt.ReadSize || o.bytes >= 0 {
|
||||
return o.bytes
|
||||
}
|
||||
ctx := context.TODO()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Size: Failed to read metadata: %v", err)
|
||||
return -1
|
||||
}
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "HEAD",
|
||||
RootURL: o.downloadURL(),
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Reading size failed: %v", err)
|
||||
} else {
|
||||
lengthStr := resp.Header.Get("Content-Length")
|
||||
length, err := strconv.ParseInt(lengthStr, 10, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Reading size failed to parse Content_length %q: %v", lengthStr, err)
|
||||
} else {
|
||||
o.bytes = length
|
||||
}
|
||||
}
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// setMetaData sets the fs data from a storage.Object
|
||||
func (o *Object) setMetaData(info *api.MediaItem) {
|
||||
o.url = info.BaseURL
|
||||
o.id = info.ID
|
||||
o.bytes = -1 // FIXME
|
||||
o.mimeType = info.MimeType
|
||||
o.modTime = info.MediaMetadata.CreationTime
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if !o.modTime.IsZero() && o.url != "" {
|
||||
return nil
|
||||
}
|
||||
dir, fileName := path.Split(o.remote)
|
||||
dir = strings.Trim(dir, "/")
|
||||
_, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||
if pattern == nil {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
if !pattern.isFile {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// If have ID fetch it directly
|
||||
if id := findID(fileName); id != "" {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/mediaItems/" + id,
|
||||
}
|
||||
var item api.MediaItem
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &item)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't get media item")
|
||||
}
|
||||
o.setMetaData(&item)
|
||||
return nil
|
||||
}
|
||||
// Otherwise list the directory the file is in
|
||||
entries, err := o.fs.List(ctx, dir)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
// and find the file in the directory
|
||||
for _, entry := range entries {
|
||||
if entry.Remote() == o.remote {
|
||||
if newO, ok := entry.(*Object); ok {
|
||||
*o = *newO
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
defer log.Trace(o, "")("")
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Storable returns a boolean as to whether this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// downloadURL returns the URL for a full bytes download for the object
|
||||
func (o *Object) downloadURL() string {
|
||||
url := o.url + "=d"
|
||||
if strings.HasPrefix(o.mimeType, "video/") {
|
||||
url += "v"
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
defer log.Trace(o, "")("")
|
||||
err = o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.downloadURL(),
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||
if pattern == nil || !pattern.isFile || !pattern.canUpload {
|
||||
return errCantUpload
|
||||
}
|
||||
var (
|
||||
albumID string
|
||||
fileName string
|
||||
)
|
||||
if pattern.isUpload {
|
||||
fileName = match[1]
|
||||
} else {
|
||||
var albumName string
|
||||
albumName, fileName = match[1], match[2]
|
||||
|
||||
// Create album if not found
|
||||
album, ok := o.fs.albums[false].get(albumName)
|
||||
if !ok {
|
||||
album, err = o.fs.createAlbum(ctx, albumName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Check we can write to this album
|
||||
if !album.IsWriteable {
|
||||
return errOwnAlbums
|
||||
}
|
||||
|
||||
albumID = album.ID
|
||||
}
|
||||
|
||||
// Upload the media item in exchange for an UploadToken
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/uploads",
|
||||
ExtraHeaders: map[string]string{
|
||||
"X-Goog-Upload-File-Name": fileName,
|
||||
"X-Goog-Upload-Protocol": "raw",
|
||||
},
|
||||
Body: in,
|
||||
}
|
||||
var token []byte
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
if err != nil {
|
||||
_ = resp.Body.Close()
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
token, err = rest.ReadBody(resp)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't upload file")
|
||||
}
|
||||
uploadToken := strings.TrimSpace(string(token))
|
||||
if uploadToken == "" {
|
||||
return errors.New("empty upload token")
|
||||
}
|
||||
|
||||
// Create the media item from an UploadToken, optionally adding to an album
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/mediaItems:batchCreate",
|
||||
}
|
||||
var request = api.BatchCreateRequest{
|
||||
AlbumID: albumID,
|
||||
NewMediaItems: []api.NewMediaItem{
|
||||
{
|
||||
SimpleMediaItem: api.SimpleMediaItem{
|
||||
UploadToken: uploadToken,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var result api.BatchCreateResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create media item")
|
||||
}
|
||||
if len(result.NewMediaItemResults) != 1 {
|
||||
return errors.New("bad response to BatchCreate wrong number of items")
|
||||
}
|
||||
mediaItemResult := result.NewMediaItemResults[0]
|
||||
if mediaItemResult.Status.Code != 0 {
|
||||
return errors.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
|
||||
}
|
||||
o.setMetaData(&mediaItemResult.MediaItem)
|
||||
|
||||
// Add upload to internal storage
|
||||
if pattern.isUpload {
|
||||
o.fs.uploaded.AddEntry(o)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||
if pattern == nil || !pattern.isFile || !pattern.canUpload || pattern.isUpload {
|
||||
return errRemove
|
||||
}
|
||||
albumName, fileName := match[1], match[2]
|
||||
album, ok := o.fs.albums[false].get(albumName)
|
||||
if !ok {
|
||||
return errors.Errorf("couldn't file %q in album %q for delete", fileName, albumName)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/albums/" + album.ID + ":batchRemoveMediaItems",
|
||||
NoResponse: true,
|
||||
}
|
||||
var request = api.BatchRemoveItems{
|
||||
MediaItemIds: []string{o.id},
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't delete item from album")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// ID of an Object if known, "" otherwise
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
306
backend/googlephotos/googlephotos_test.go
Normal file
306
backend/googlephotos/googlephotos_test.go
Normal file
|
@ -0,0 +1,306 @@
|
|||
package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
// We have two different files here as Google Photos will uniq
|
||||
// them otherwise which confuses the tests as the filename is
|
||||
// unexpected.
|
||||
fileNameAlbum = "rclone-test-image1.jpg"
|
||||
fileNameUpload = "rclone-test-image2.jpg"
|
||||
)
|
||||
|
||||
// Wrapper to override the remote for an object
|
||||
type overrideRemoteObject struct {
|
||||
fs.Object
|
||||
remote string
|
||||
}
|
||||
|
||||
// Remote returns the overridden remote name
|
||||
func (o *overrideRemoteObject) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fstest.Initialise()
|
||||
|
||||
// Create Fs
|
||||
if *fstest.RemoteName == "" {
|
||||
*fstest.RemoteName = "TestGooglePhotos:"
|
||||
}
|
||||
f, err := fs.NewFs(*fstest.RemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create local Fs pointing at testfiles
|
||||
localFs, err := fs.NewFs("testfiles")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("CreateAlbum", func(t *testing.T) {
|
||||
albumName := "album/rclone-test-" + fstest.RandomString(24)
|
||||
err = f.Mkdir(ctx, albumName)
|
||||
require.NoError(t, err)
|
||||
remote := albumName + "/" + fileNameAlbum
|
||||
|
||||
t.Run("PutFile", func(t *testing.T) {
|
||||
srcObj, err := localFs.NewObject(ctx, fileNameAlbum)
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
remoteWithID := addFileID(remote, dstObj.(*Object).id)
|
||||
|
||||
t.Run("ObjectFs", func(t *testing.T) {
|
||||
assert.Equal(t, f, dstObj.Fs())
|
||||
})
|
||||
|
||||
t.Run("ObjectString", func(t *testing.T) {
|
||||
assert.Equal(t, remote, dstObj.String())
|
||||
assert.Equal(t, "<nil>", (*Object)(nil).String())
|
||||
})
|
||||
|
||||
t.Run("ObjectHash", func(t *testing.T) {
|
||||
h, err := dstObj.Hash(ctx, hash.MD5)
|
||||
assert.Equal(t, "", h)
|
||||
assert.Equal(t, hash.ErrUnsupported, err)
|
||||
})
|
||||
|
||||
t.Run("ObjectSize", func(t *testing.T) {
|
||||
assert.Equal(t, int64(-1), dstObj.Size())
|
||||
f.(*Fs).opt.ReadSize = true
|
||||
defer func() {
|
||||
f.(*Fs).opt.ReadSize = false
|
||||
}()
|
||||
size := dstObj.Size()
|
||||
assert.True(t, size > 1000, fmt.Sprintf("Size too small %d", size))
|
||||
})
|
||||
|
||||
t.Run("ObjectSetModTime", func(t *testing.T) {
|
||||
err := dstObj.SetModTime(ctx, time.Now())
|
||||
assert.Equal(t, fs.ErrorCantSetModTime, err)
|
||||
})
|
||||
|
||||
t.Run("ObjectStorable", func(t *testing.T) {
|
||||
assert.True(t, dstObj.Storable())
|
||||
})
|
||||
|
||||
t.Run("ObjectOpen", func(t *testing.T) {
|
||||
in, err := dstObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
buf, err := ioutil.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, in.Close())
|
||||
assert.True(t, len(buf) > 1000)
|
||||
contentType := http.DetectContentType(buf[:512])
|
||||
assert.Equal(t, "image/jpeg", contentType)
|
||||
})
|
||||
|
||||
t.Run("CheckFileInAlbum", func(t *testing.T) {
|
||||
entries, err := f.List(ctx, albumName)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, remote, entries[0].Remote())
|
||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||
})
|
||||
|
||||
// Check it is there in the date/month/year heirachy
|
||||
// 2013-07-13 is the creation date of the folder
|
||||
checkPresent := func(t *testing.T, objPath string) {
|
||||
entries, err := f.List(ctx, objPath)
|
||||
require.NoError(t, err)
|
||||
found := false
|
||||
for _, entry := range entries {
|
||||
leaf := path.Base(entry.Remote())
|
||||
if leaf == fileNameAlbum || leaf == remoteWithID {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
assert.True(t, found, fmt.Sprintf("didn't find %q in %q", fileNameAlbum, objPath))
|
||||
}
|
||||
|
||||
t.Run("CheckInByYear", func(t *testing.T) {
|
||||
checkPresent(t, "media/by-year/2013")
|
||||
})
|
||||
|
||||
t.Run("CheckInByMonth", func(t *testing.T) {
|
||||
checkPresent(t, "media/by-month/2013/2013-07")
|
||||
})
|
||||
|
||||
t.Run("CheckInByDay", func(t *testing.T) {
|
||||
checkPresent(t, "media/by-day/2013/2013-07-26")
|
||||
})
|
||||
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, remote, o.Remote())
|
||||
})
|
||||
|
||||
t.Run("NewObjectWithID", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, remoteWithID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, remoteWithID, o.Remote())
|
||||
})
|
||||
|
||||
t.Run("NewFsIsFile", func(t *testing.T) {
|
||||
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
|
||||
assert.Equal(t, fs.ErrorIsFile, err)
|
||||
leaf := path.Base(remote)
|
||||
o, err := fNew.NewObject(ctx, leaf)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, leaf, o.Remote())
|
||||
})
|
||||
|
||||
t.Run("RemoveFileFromAlbum", func(t *testing.T) {
|
||||
err = dstObj.Remove(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// Check album empty
|
||||
entries, err := f.List(ctx, albumName)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(entries))
|
||||
})
|
||||
})
|
||||
|
||||
// remove the album
|
||||
err = f.Rmdir(ctx, albumName)
|
||||
require.Error(t, err) // FIXME doesn't work yet
|
||||
})
|
||||
|
||||
t.Run("UploadMkdir", func(t *testing.T) {
|
||||
assert.NoError(t, f.Mkdir(ctx, "upload/dir"))
|
||||
assert.NoError(t, f.Mkdir(ctx, "upload/dir/subdir"))
|
||||
|
||||
t.Run("List", func(t *testing.T) {
|
||||
entries, err := f.List(ctx, "upload")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, "upload/dir", entries[0].Remote())
|
||||
|
||||
entries, err = f.List(ctx, "upload/dir")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, "upload/dir/subdir", entries[0].Remote())
|
||||
})
|
||||
|
||||
t.Run("Rmdir", func(t *testing.T) {
|
||||
assert.NoError(t, f.Rmdir(ctx, "upload/dir/subdir"))
|
||||
assert.NoError(t, f.Rmdir(ctx, "upload/dir"))
|
||||
|
||||
})
|
||||
|
||||
t.Run("ListEmpty", func(t *testing.T) {
|
||||
entries, err := f.List(ctx, "upload")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(entries))
|
||||
|
||||
_, err = f.List(ctx, "upload/dir")
|
||||
assert.Equal(t, fs.ErrorDirNotFound, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Upload", func(t *testing.T) {
|
||||
uploadDir := "upload/dir/subdir"
|
||||
remote := path.Join(uploadDir, fileNameUpload)
|
||||
|
||||
srcObj, err := localFs.NewObject(ctx, fileNameUpload)
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
remoteWithID := addFileID(remote, dstObj.(*Object).id)
|
||||
|
||||
t.Run("List", func(t *testing.T) {
|
||||
entries, err := f.List(ctx, uploadDir)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, remote, entries[0].Remote())
|
||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||
})
|
||||
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, remote, o.Remote())
|
||||
})
|
||||
|
||||
t.Run("NewObjectWithID", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, remoteWithID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, remoteWithID, o.Remote())
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
t.Run("Name", func(t *testing.T) {
|
||||
assert.Equal(t, (*fstest.RemoteName)[:len(*fstest.RemoteName)-1], f.Name())
|
||||
})
|
||||
|
||||
t.Run("Root", func(t *testing.T) {
|
||||
assert.Equal(t, "", f.Root())
|
||||
})
|
||||
|
||||
t.Run("String", func(t *testing.T) {
|
||||
assert.Equal(t, `Google Photos path ""`, f.String())
|
||||
})
|
||||
|
||||
t.Run("Features", func(t *testing.T) {
|
||||
features := f.Features()
|
||||
assert.False(t, features.CaseInsensitive)
|
||||
assert.True(t, features.ReadMimeType)
|
||||
})
|
||||
|
||||
t.Run("Precision", func(t *testing.T) {
|
||||
assert.Equal(t, fs.ModTimeNotSupported, f.Precision())
|
||||
})
|
||||
|
||||
t.Run("Hashes", func(t *testing.T) {
|
||||
assert.Equal(t, hash.Set(hash.None), f.Hashes())
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAddID(t *testing.T) {
|
||||
assert.Equal(t, "potato {123}", addID("potato", "123"))
|
||||
assert.Equal(t, "{123}", addID("", "123"))
|
||||
}
|
||||
|
||||
func TestFileAddID(t *testing.T) {
|
||||
assert.Equal(t, "potato {123}.txt", addFileID("potato.txt", "123"))
|
||||
assert.Equal(t, "potato {123}", addFileID("potato", "123"))
|
||||
assert.Equal(t, "{123}", addFileID("", "123"))
|
||||
}
|
||||
|
||||
func TestFindID(t *testing.T) {
|
||||
assert.Equal(t, "", findID("potato"))
|
||||
ID := "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
|
||||
assert.Equal(t, ID, findID("potato {"+ID+"}.txt"))
|
||||
ID = ID[1:]
|
||||
assert.Equal(t, "", findID("potato {"+ID+"}.txt"))
|
||||
}
|
335
backend/googlephotos/pattern.go
Normal file
335
backend/googlephotos/pattern.go
Normal file
|
@ -0,0 +1,335 @@
|
|||
// Store the parsing of file patterns
|
||||
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/googlephotos/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// lister describes the subset of the interfaces on Fs needed for the
|
||||
// file pattern parsing
|
||||
type lister interface {
|
||||
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
|
||||
listAlbums(shared bool) (all *albums, err error)
|
||||
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
||||
dirTime() time.Time
|
||||
}
|
||||
|
||||
// dirPattern describes a single directory pattern
|
||||
type dirPattern struct {
|
||||
re string // match for the path
|
||||
match *regexp.Regexp // compiled match
|
||||
canUpload bool // true if can upload here
|
||||
canMkdir bool // true if can make a directory here
|
||||
isFile bool // true if this is a file
|
||||
isUpload bool // true if this is the upload directory
|
||||
// function to turn a match into DirEntries
|
||||
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
|
||||
}
|
||||
|
||||
// dirPatters is a slice of all the directory patterns
|
||||
type dirPatterns []dirPattern
|
||||
|
||||
// patterns describes the layout of the google photos backend file system.
|
||||
//
|
||||
// NB no trailing / on paths
|
||||
var patterns = dirPatterns{
|
||||
{
|
||||
re: `^$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
return fs.DirEntries{
|
||||
fs.NewDir(prefix+"media", f.dirTime()),
|
||||
fs.NewDir(prefix+"album", f.dirTime()),
|
||||
fs.NewDir(prefix+"shared-album", f.dirTime()),
|
||||
fs.NewDir(prefix+"upload", f.dirTime()),
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^upload(?:/(.*))?$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
return f.listUploads(ctx, match[0])
|
||||
},
|
||||
canUpload: true,
|
||||
canMkdir: true,
|
||||
isUpload: true,
|
||||
},
|
||||
{
|
||||
re: `^upload/(.*)$`,
|
||||
isFile: true,
|
||||
canUpload: true,
|
||||
isUpload: true,
|
||||
},
|
||||
{
|
||||
re: `^media$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
return fs.DirEntries{
|
||||
fs.NewDir(prefix+"all", f.dirTime()),
|
||||
fs.NewDir(prefix+"by-year", f.dirTime()),
|
||||
fs.NewDir(prefix+"by-month", f.dirTime()),
|
||||
fs.NewDir(prefix+"by-day", f.dirTime()),
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/all$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
return f.listDir(ctx, prefix, api.SearchFilter{})
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/all/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^media/by-year$`,
|
||||
toEntries: years,
|
||||
},
|
||||
{
|
||||
re: `^media/by-year/(\d{4})$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
filter, err := yearMonthDayFilter(ctx, f, match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.listDir(ctx, prefix, filter)
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/by-year/(\d{4})/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^media/by-month$`,
|
||||
toEntries: years,
|
||||
},
|
||||
{
|
||||
re: `^media/by-month/(\d{4})$`,
|
||||
toEntries: months,
|
||||
},
|
||||
{
|
||||
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
filter, err := yearMonthDayFilter(ctx, f, match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.listDir(ctx, prefix, filter)
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^media/by-day$`,
|
||||
toEntries: years,
|
||||
},
|
||||
{
|
||||
re: `^media/by-day/(\d{4})$`,
|
||||
toEntries: days,
|
||||
},
|
||||
{
|
||||
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
filter, err := yearMonthDayFilter(ctx, f, match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.listDir(ctx, prefix, filter)
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^album$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
return albumsToEntries(ctx, f, false, prefix, "")
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^album/(.+)$`,
|
||||
canMkdir: true,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
return albumsToEntries(ctx, f, false, prefix, match[1])
|
||||
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^album/(.+?)/([^/]+)$`,
|
||||
canUpload: true,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^shared-album$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
return albumsToEntries(ctx, f, true, prefix, "")
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^shared-album/(.+)$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
return albumsToEntries(ctx, f, true, prefix, match[1])
|
||||
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^shared-album/(.+?)/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
}.mustCompile()
|
||||
|
||||
// mustCompile compiles the regexps in the dirPatterns
|
||||
func (ds dirPatterns) mustCompile() dirPatterns {
|
||||
for i := range ds {
|
||||
pattern := &ds[i]
|
||||
pattern.match = regexp.MustCompile(pattern.re)
|
||||
}
|
||||
return ds
|
||||
}
|
||||
|
||||
// match finds the path passed in in the matching structure and
|
||||
// returns the parameters and a pointer to the match, or nil.
|
||||
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
|
||||
itemPath = strings.Trim(itemPath, "/")
|
||||
absPath := path.Join(root, itemPath)
|
||||
prefix = strings.Trim(absPath[len(root):], "/")
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
for i := range ds {
|
||||
pattern = &ds[i]
|
||||
if pattern.isFile != isFile {
|
||||
continue
|
||||
}
|
||||
match = pattern.match.FindStringSubmatch(absPath)
|
||||
if match != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
// Return the years from 2000 to today
|
||||
// FIXME make configurable?
|
||||
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
currentYear := f.dirTime().Year()
|
||||
for year := 2000; year <= currentYear; year++ {
|
||||
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Return the months in a given year
|
||||
func months(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
year := match[1]
|
||||
for month := 1; month <= 12; month++ {
|
||||
entries = append(entries, fs.NewDir(fmt.Sprintf("%s%s-%02d", prefix, year, month), f.dirTime()))
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Return the days in a given year
|
||||
func days(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
year := match[1]
|
||||
current, err := time.Parse("2006", year)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("bad year %q", match[1])
|
||||
}
|
||||
currentYear := current.Year()
|
||||
for current.Year() == currentYear {
|
||||
entries = append(entries, fs.NewDir(prefix+current.Format("2006-01-02"), f.dirTime()))
|
||||
current = current.AddDate(0, 0, 1)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// This creates a search filter on year/month/day as provided
|
||||
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
|
||||
year, err := strconv.Atoi(match[1])
|
||||
if err != nil || year < 1000 || year > 3000 {
|
||||
return sf, errors.Errorf("bad year %q", match[1])
|
||||
}
|
||||
sf = api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
DateFilter: &api.DateFilter{
|
||||
Dates: []api.Date{
|
||||
{
|
||||
Year: year,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if len(match) >= 3 {
|
||||
month, err := strconv.Atoi(match[2])
|
||||
if err != nil || month < 1 || month > 12 {
|
||||
return sf, errors.Errorf("bad month %q", match[2])
|
||||
}
|
||||
sf.Filters.DateFilter.Dates[0].Month = month
|
||||
}
|
||||
if len(match) >= 4 {
|
||||
day, err := strconv.Atoi(match[3])
|
||||
if err != nil || day < 1 || day > 31 {
|
||||
return sf, errors.Errorf("bad day %q", match[3])
|
||||
}
|
||||
sf.Filters.DateFilter.Dates[0].Day = day
|
||||
}
|
||||
return sf, nil
|
||||
}
|
||||
|
||||
// Turns an albumPath into entries
|
||||
//
|
||||
// These can either be synthetic directory entries if the album path
|
||||
// is a prefix of another album, or actual files, or a combination of
|
||||
// the two.
|
||||
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
|
||||
albums, err := f.listAlbums(shared)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Put in the directories
|
||||
dirs, foundAlbumPath := albums.getDirs(albumPath)
|
||||
if foundAlbumPath {
|
||||
for _, dir := range dirs {
|
||||
d := fs.NewDir(prefix+dir, f.dirTime())
|
||||
dirPath := path.Join(albumPath, dir)
|
||||
// if this dir is an album add more special stuff
|
||||
album, ok := albums.get(dirPath)
|
||||
if ok {
|
||||
count, err := strconv.ParseInt(album.MediaItemsCount, 10, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Error reading media count: %v", err)
|
||||
}
|
||||
d.SetID(album.ID).SetItems(count)
|
||||
}
|
||||
entries = append(entries, d)
|
||||
}
|
||||
}
|
||||
// if this is an album then return a filter to list it
|
||||
album, foundAlbum := albums.get(albumPath)
|
||||
if foundAlbum {
|
||||
filter := api.SearchFilter{AlbumID: album.ID}
|
||||
newEntries, err := f.listDir(ctx, prefix, filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, newEntries...)
|
||||
}
|
||||
if !foundAlbumPath && !foundAlbum && albumPath != "" {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return entries, nil
|
||||
}
|
495
backend/googlephotos/pattern_test.go
Normal file
495
backend/googlephotos/pattern_test.go
Normal file
|
@ -0,0 +1,495 @@
|
|||
package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/googlephotos/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/dirtree"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/fstest/mockobject"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// time for directories
|
||||
var startTime = fstest.Time("2019-06-24T15:53:05.999999999Z")
|
||||
|
||||
// mock Fs for testing patterns
|
||||
type testLister struct {
|
||||
t *testing.T
|
||||
albums *albums
|
||||
names []string
|
||||
uploaded dirtree.DirTree
|
||||
}
|
||||
|
||||
// newTestLister makes a mock for testing
|
||||
func newTestLister(t *testing.T) *testLister {
|
||||
return &testLister{
|
||||
t: t,
|
||||
albums: newAlbums(),
|
||||
uploaded: dirtree.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// mock listDir for testing
|
||||
func (f *testLister) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
|
||||
for _, name := range f.names {
|
||||
entries = append(entries, mockobject.New(prefix+name))
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// mock listAlbums for testing
|
||||
func (f *testLister) listAlbums(shared bool) (all *albums, err error) {
|
||||
return f.albums, nil
|
||||
}
|
||||
|
||||
// mock listUploads for testing
|
||||
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries, _ = f.uploaded[dir]
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// mock dirTime for testing
|
||||
func (f *testLister) dirTime() time.Time {
|
||||
return startTime
|
||||
}
|
||||
|
||||
func TestPatternMatch(t *testing.T) {
|
||||
for testNumber, test := range []struct {
|
||||
// input
|
||||
root string
|
||||
itemPath string
|
||||
isFile bool
|
||||
// expected output
|
||||
wantMatch []string
|
||||
wantPrefix string
|
||||
wantPattern *dirPattern
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{""},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[0],
|
||||
},
|
||||
{
|
||||
root: "",
|
||||
itemPath: "",
|
||||
isFile: true,
|
||||
wantMatch: nil,
|
||||
wantPrefix: "",
|
||||
wantPattern: nil,
|
||||
},
|
||||
{
|
||||
root: "upload",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{"upload", ""},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[1],
|
||||
},
|
||||
{
|
||||
root: "upload/dir",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{"upload/dir", "dir"},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[1],
|
||||
},
|
||||
{
|
||||
root: "upload/file.jpg",
|
||||
itemPath: "",
|
||||
isFile: true,
|
||||
wantMatch: []string{"upload/file.jpg", "file.jpg"},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[2],
|
||||
},
|
||||
{
|
||||
root: "media",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{"media"},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[3],
|
||||
},
|
||||
{
|
||||
root: "",
|
||||
itemPath: "media",
|
||||
isFile: false,
|
||||
wantMatch: []string{"media"},
|
||||
wantPrefix: "media/",
|
||||
wantPattern: &patterns[3],
|
||||
},
|
||||
{
|
||||
root: "media/all",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{"media/all"},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[4],
|
||||
},
|
||||
{
|
||||
root: "media",
|
||||
itemPath: "all",
|
||||
isFile: false,
|
||||
wantMatch: []string{"media/all"},
|
||||
wantPrefix: "all/",
|
||||
wantPattern: &patterns[4],
|
||||
},
|
||||
{
|
||||
root: "media/all",
|
||||
itemPath: "file.jpg",
|
||||
isFile: true,
|
||||
wantMatch: []string{"media/all/file.jpg", "file.jpg"},
|
||||
wantPrefix: "file.jpg/",
|
||||
wantPattern: &patterns[5],
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) {
|
||||
gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile)
|
||||
assert.Equal(t, test.wantMatch, gotMatch)
|
||||
assert.Equal(t, test.wantPrefix, gotPrefix)
|
||||
assert.Equal(t, test.wantPattern, gotPattern)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatternMatchToEntries(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := newTestLister(t)
|
||||
f.names = []string{"file.jpg"}
|
||||
f.albums.add(&api.Album{
|
||||
ID: "1",
|
||||
Title: "sub/one",
|
||||
})
|
||||
f.albums.add(&api.Album{
|
||||
ID: "2",
|
||||
Title: "sub",
|
||||
})
|
||||
f.uploaded.AddEntry(mockobject.New("upload/file1.jpg"))
|
||||
f.uploaded.AddEntry(mockobject.New("upload/dir/file2.jpg"))
|
||||
|
||||
for testNumber, test := range []struct {
|
||||
// input
|
||||
root string
|
||||
itemPath string
|
||||
// expected output
|
||||
wantMatch []string
|
||||
wantPrefix string
|
||||
remotes []string
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
itemPath: "",
|
||||
wantMatch: []string{""},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"media/", "album/", "shared-album/", "upload/"},
|
||||
},
|
||||
{
|
||||
root: "upload",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"upload", ""},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"upload/file1.jpg", "upload/dir/"},
|
||||
},
|
||||
{
|
||||
root: "upload",
|
||||
itemPath: "dir",
|
||||
wantMatch: []string{"upload/dir", "dir"},
|
||||
wantPrefix: "dir/",
|
||||
remotes: []string{"upload/dir/file2.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"all/", "by-year/", "by-month/", "by-day/"},
|
||||
},
|
||||
{
|
||||
root: "media/all",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/all"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media",
|
||||
itemPath: "all",
|
||||
wantMatch: []string{"media/all"},
|
||||
wantPrefix: "all/",
|
||||
remotes: []string{"all/file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media/by-year",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-year"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-year/2000",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-year/2000", "2000"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media/by-month",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-month"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-month/2001",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-month/2001", "2001"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2001-01/", "2001-02/", "2001-03/", "2001-04/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-month/2001/2001-01",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-month/2001/2001-01", "2001", "01"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media/by-day",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-day"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-day/2001",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-day/2001", "2001"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2001-01-01/", "2001-01-02/", "2001-01-03/", "2001-01-04/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-day/2001/2001-01-02",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-day/2001/2001-01-02", "2001", "01", "02"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "album",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"album"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"sub/"},
|
||||
},
|
||||
{
|
||||
root: "album/sub",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"album/sub", "sub"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"one/", "file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "album/sub/one",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"album/sub/one", "sub/one"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "shared-album",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"shared-album"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"sub/"},
|
||||
},
|
||||
{
|
||||
root: "shared-album/sub",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"shared-album/sub", "sub"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"one/", "file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "shared-album/sub/one",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"shared-album/sub/one", "sub/one"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q", testNumber, test.root, test.itemPath), func(t *testing.T) {
|
||||
match, prefix, pattern := patterns.match(test.root, test.itemPath, false)
|
||||
assert.Equal(t, test.wantMatch, match)
|
||||
assert.Equal(t, test.wantPrefix, prefix)
|
||||
assert.NotNil(t, pattern)
|
||||
assert.NotNil(t, pattern.toEntries)
|
||||
|
||||
entries, err := pattern.toEntries(ctx, f, prefix, match)
|
||||
assert.NoError(t, err)
|
||||
var remotes = []string{}
|
||||
for _, entry := range entries {
|
||||
remote := entry.Remote()
|
||||
if _, isDir := entry.(fs.Directory); isDir {
|
||||
remote += "/"
|
||||
}
|
||||
remotes = append(remotes, remote)
|
||||
if len(remotes) >= 4 {
|
||||
break // only test first 4 entries
|
||||
}
|
||||
}
|
||||
assert.Equal(t, test.remotes, remotes)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatternYears(t *testing.T) {
|
||||
f := newTestLister(t)
|
||||
entries, err := years(context.Background(), f, "potato/", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
year := 2000
|
||||
for _, entry := range entries {
|
||||
assert.Equal(t, "potato/"+fmt.Sprint(year), entry.Remote())
|
||||
year++
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatternMonths(t *testing.T) {
|
||||
f := newTestLister(t)
|
||||
entries, err := months(context.Background(), f, "potato/", []string{"", "2020"})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 12, len(entries))
|
||||
for i, entry := range entries {
|
||||
assert.Equal(t, fmt.Sprintf("potato/2020-%02d", i+1), entry.Remote())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatternDays(t *testing.T) {
|
||||
f := newTestLister(t)
|
||||
entries, err := days(context.Background(), f, "potato/", []string{"", "2020"})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 366, len(entries))
|
||||
assert.Equal(t, "potato/2020-01-01", entries[0].Remote())
|
||||
assert.Equal(t, "potato/2020-12-31", entries[len(entries)-1].Remote())
|
||||
}
|
||||
|
||||
func TestPatternYearMonthDayFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := newTestLister(t)
|
||||
|
||||
// Years
|
||||
sf, err := yearMonthDayFilter(ctx, f, []string{"", "2000"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
DateFilter: &api.DateFilter{
|
||||
Dates: []api.Date{
|
||||
{
|
||||
Year: 2000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, sf)
|
||||
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "potato"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "999"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "4000"})
|
||||
require.Error(t, err)
|
||||
|
||||
// Months
|
||||
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
DateFilter: &api.DateFilter{
|
||||
Dates: []api.Date{
|
||||
{
|
||||
Month: 1,
|
||||
Year: 2000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, sf)
|
||||
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "potato"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "0"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "13"})
|
||||
require.Error(t, err)
|
||||
|
||||
// Days
|
||||
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "02"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
DateFilter: &api.DateFilter{
|
||||
Dates: []api.Date{
|
||||
{
|
||||
Day: 2,
|
||||
Month: 1,
|
||||
Year: 2000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, sf)
|
||||
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "potato"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "0"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "32"})
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestPatternAlbumsToEntries(t *testing.T) {
|
||||
f := newTestLister(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := albumsToEntries(ctx, f, false, "potato/", "sub")
|
||||
assert.Equal(t, fs.ErrorDirNotFound, err)
|
||||
|
||||
f.albums.add(&api.Album{
|
||||
ID: "1",
|
||||
Title: "sub/one",
|
||||
})
|
||||
|
||||
entries, err := albumsToEntries(ctx, f, false, "potato/", "sub")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, "potato/one", entries[0].Remote())
|
||||
_, ok := entries[0].(fs.Directory)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
f.albums.add(&api.Album{
|
||||
ID: "1",
|
||||
Title: "sub",
|
||||
})
|
||||
f.names = []string{"file.jpg"}
|
||||
|
||||
entries, err = albumsToEntries(ctx, f, false, "potato/", "sub")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(entries))
|
||||
assert.Equal(t, "potato/one", entries[0].Remote())
|
||||
_, ok = entries[0].(fs.Directory)
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, "potato/file.jpg", entries[1].Remote())
|
||||
_, ok = entries[1].(fs.Object)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
}
|
BIN
backend/googlephotos/testfiles/rclone-test-image1.jpg
Normal file
BIN
backend/googlephotos/testfiles/rclone-test-image1.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 16 KiB |
BIN
backend/googlephotos/testfiles/rclone-test-image2.jpg
Normal file
BIN
backend/googlephotos/testfiles/rclone-test-image2.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 16 KiB |
|
@ -35,6 +35,7 @@ docs = [
|
|||
"ftp.md",
|
||||
"googlecloudstorage.md",
|
||||
"drive.md",
|
||||
"googlephotos.md",
|
||||
"http.md",
|
||||
"hubic.md",
|
||||
"jottacloud.md",
|
||||
|
|
|
@ -26,6 +26,7 @@ Rclone is a command line program to sync files and directories to and from:
|
|||
* {{< provider name="FTP" home="https://en.wikipedia.org/wiki/File_Transfer_Protocol" config="/ftp/" >}}
|
||||
* {{< provider name="Google Cloud Storage" home="https://cloud.google.com/storage/" config="/googlecloudstorage/" >}}
|
||||
* {{< provider name="Google Drive" home="https://www.google.com/drive/" config="/drive/" >}}
|
||||
* {{< provider name="Google Photos" home="https://www.google.com/photos/about/" config="/googlephotos/" >}}
|
||||
* {{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}}
|
||||
* {{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}}
|
||||
* {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
|
|
|
@ -32,6 +32,7 @@ See the following for detailed instructions for
|
|||
* [FTP](/ftp/)
|
||||
* [Google Cloud Storage](/googlecloudstorage/)
|
||||
* [Google Drive](/drive/)
|
||||
* [Google Photos](/googlephotos/)
|
||||
* [HTTP](/http/)
|
||||
* [Hubic](/hubic/)
|
||||
* [Jottacloud](/jottacloud/)
|
||||
|
|
361
docs/content/googlephotos.md
Normal file
361
docs/content/googlephotos.md
Normal file
|
@ -0,0 +1,361 @@
|
|||
---
|
||||
title: "Google Photos"
|
||||
description: "Rclone docs for Google Photos"
|
||||
date: "2019-06-25"
|
||||
---
|
||||
|
||||
<i class="fa fa-photo"></i> Google Photos
|
||||
-------------------------------------------------
|
||||
|
||||
The rclone backend for [Google Photos](https://www.google.com/photos/about/) is
|
||||
a specialized backend for transferring photos and videos to and from
|
||||
Google Photos.
|
||||
|
||||
**NB** The Google Photos API which rclone uses has quite a few
|
||||
limitations, so please read the [limitations section](#limitations)
|
||||
carefully to make sure it is suitable for your use.
|
||||
|
||||
## Configuring Google Photos
|
||||
|
||||
The initial setup for google cloud storage involves getting a token from Google Photos
|
||||
which you need to do in your browser. `rclone config` walks you
|
||||
through it.
|
||||
|
||||
Here is an example of how to make a remote called `remote`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
[snip]
|
||||
13 / Google Photos
|
||||
\ "google photos"
|
||||
[snip]
|
||||
Storage> google photos
|
||||
** See help for google photos backend at: https://rclone.org/googlephotos/ **
|
||||
|
||||
Google Application Client Id
|
||||
Leave blank normally.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
client_id>
|
||||
Google Application Client Secret
|
||||
Leave blank normally.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
client_secret>
|
||||
Set to make the Google Photos backend read only.
|
||||
|
||||
If you choose read only then rclone will only request read only access
|
||||
to your photos, otherwise rclone will request full access.
|
||||
Enter a boolean value (true or false). Press Enter for the default ("false").
|
||||
read_only>
|
||||
Edit advanced config? (y/n)
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> n
|
||||
Remote config
|
||||
Use auto config?
|
||||
* Say Y if not sure
|
||||
* Say N if you are working on a remote or headless machine
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
|
||||
*** IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||
*** are stored in full resolution at original quality. These uploads
|
||||
*** will count towards storage in your Google Account.
|
||||
|
||||
--------------------
|
||||
[remote]
|
||||
type = google photos
|
||||
token = {"access_token":"ya29.Gls1B-tkDOExu0hSWK0UdQ-6fgixD1Rvs3jlzqWgAKvQD5w995leAeiY96_fIccnOeW1jn8KboLTNUSp1kkXGPohBZAHjN1nygUiNGimZlwgs91JDmtf5JHNC2RV","token_type":"Bearer","refresh_token":"1/wTok2SVTqtmu6BcrUI7LUOu7MnvUN52m-rGFfanE50c","expiry":"2019-06-28T17:38:04.644930156+01:00"}
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Google if you use auto config mode. This only
|
||||
runs from the moment it opens your browser to the moment you get back
|
||||
the verification code. This is on `http://127.0.0.1:53682/` and this
|
||||
may require you to unblock it temporarily if you are running a host
|
||||
firewall, or use manual mode.
|
||||
|
||||
This remote is called `remote` and can now be used like this
|
||||
|
||||
See all the albums in your photos
|
||||
|
||||
rclone lsd remote:album
|
||||
|
||||
Make a new album
|
||||
|
||||
rclone mkdir remote:album/newAlbum
|
||||
|
||||
List the contents of an album
|
||||
|
||||
rclone ls remote:album/newAlbum
|
||||
|
||||
Sync `/home/local/images` to the Google Photos, removing any excess
|
||||
files in the album.
|
||||
|
||||
rclone sync /home/local/image remote:album/newAlbum
|
||||
|
||||
## Layout
|
||||
|
||||
As Google Photos is not a general purpose cloud storage system the
|
||||
backend is laid out to help you navigate it.
|
||||
|
||||
The directories under `media` show different ways of categorizing the
|
||||
media. Each file will appear multiple times. So if you want to make
|
||||
a backup of your google photos you might choose to backup
|
||||
`remote:media/by-month`. (**NB** `remote:media/by-day` is rather slow
|
||||
at the moment so avoid for syncing.)
|
||||
|
||||
Note that all your photos and videos will appear somewhere under
|
||||
`media`, but they may not appear under `album` unless you've put them
|
||||
into albums.
|
||||
|
||||
```
|
||||
/
|
||||
- upload
|
||||
- file1.jpg
|
||||
- file2.jpg
|
||||
- ...
|
||||
- media
|
||||
- all
|
||||
- file1.jpg
|
||||
- file2.jpg
|
||||
- ...
|
||||
- by-year
|
||||
- 2000
|
||||
- file1.jpg
|
||||
- ...
|
||||
- 2001
|
||||
- file2.jpg
|
||||
- ...
|
||||
- ...
|
||||
- by-month
|
||||
- 2000
|
||||
- 2000-01
|
||||
- file1.jpg
|
||||
- ...
|
||||
- 2000-02
|
||||
- file2.jpg
|
||||
- ...
|
||||
- ...
|
||||
- by-day
|
||||
- 2000
|
||||
- 2000-01-01
|
||||
- file1.jpg
|
||||
- ...
|
||||
- 2000-01-02
|
||||
- file2.jpg
|
||||
- ...
|
||||
- ...
|
||||
- album
|
||||
- album name
|
||||
- album name/sub
|
||||
- shared-album
|
||||
- album name
|
||||
- album name/sub
|
||||
```
|
||||
|
||||
There are two writable parts of the tree, the `upload` directory and
|
||||
sub directories of the the `album` directory.
|
||||
|
||||
The `upload` directory is for uploading files you don't want to put
|
||||
into albums. This will be empty to start with and will contain the
|
||||
files you've uploaded for one rclone session only, becoming empty
|
||||
again when you restart rclone. The use case for this would be if you
|
||||
have a load of files you just want to once off dump into Google
|
||||
Photos. For repeated syncing, uploading to `album` will work better.
|
||||
|
||||
Directories within the `album` directory are also writeable and you
|
||||
may create new directories (albums) under `album`. If you copy files
|
||||
with a directory hierarchy in there then rclone will create albums
|
||||
with the `/` character in them. For example if you do
|
||||
|
||||
rclone copy /path/to/images remote:album/images
|
||||
|
||||
and the images directory contains
|
||||
|
||||
```
|
||||
images
|
||||
- file1.jpg
|
||||
dir
|
||||
file2.jpg
|
||||
dir2
|
||||
dir3
|
||||
file3.jpg
|
||||
```
|
||||
|
||||
Then rclone will create the following albums with the following files in
|
||||
|
||||
- images
|
||||
- file1.jpg
|
||||
- images/dir
|
||||
- file2.jpg
|
||||
- images/dir2/dir3
|
||||
- file3.jpg
|
||||
|
||||
This means that you can use the `album` path pretty much like a normal
|
||||
filesystem and it is a good target for repeated syncing.
|
||||
|
||||
The `shared-album` directory shows albums shared with you or by you.
|
||||
This is similar to the Sharing tab in the Google Photos web interface.
|
||||
|
||||
## Limitations
|
||||
|
||||
Only images and videos can be uploaded. If you attempt to upload non
|
||||
videos or images or formats that Google Photos doesn't understand,
|
||||
rclone will upload the file, then Google Photos will give an error
|
||||
when it is put turned into a media item.
|
||||
|
||||
Note that all media items uploaded to Google Photos through the API
|
||||
are stored in full resolution at "original quality" and **will** count
|
||||
towards your storage quota in your Google Account. The API does
|
||||
**not** offer a way to upload in "high quality" mode..
|
||||
|
||||
### Downloading Images
|
||||
|
||||
When Images are downloaded this strips EXIF location (according to the
|
||||
docs and my tests). This is a limitation of the Google Photos API and
|
||||
is covered by [bug #112096115](https://issuetracker.google.com/issues/112096115).
|
||||
|
||||
### Downloading Videos
|
||||
|
||||
When videos are downloaded they are downloaded in a really compressed
|
||||
version of the video compared to downloading it via the Google Photos
|
||||
web interface. This is covered by [bug #113672044](https://issuetracker.google.com/issues/113672044).
|
||||
|
||||
### Duplicates
|
||||
|
||||
If a file name is duplicated in a directory then rclone will add the
|
||||
file ID into its name. So two files called `file.jpg` would then
|
||||
appear as `file {123456}.jpg` and `file {ABCDEF}.jpg` (the actual IDs
|
||||
are a lot longer alas!).
|
||||
|
||||
If you upload the same image (with the same binary data) twice then
|
||||
Google Photos will deduplicate it. However it will retain the
|
||||
filename from the first upload which may confuse rclone. For example
|
||||
if you uploaded an image to `upload` then uploaded the same image to
|
||||
`album/my_album` the filename of the image in `album/my_album` will be
|
||||
what it was uploaded with initially, not what you uploaded it with to
|
||||
`album`. In practise this shouldn't cause too many problems.
|
||||
|
||||
### Modified time
|
||||
|
||||
The date shown of media in Google Photos is the creation date as
|
||||
determined by the EXIF information, or the upload date if that is not
|
||||
known.
|
||||
|
||||
This is not changeable by rclone and is not the modification date of
|
||||
the media on local disk. This means that rclone cannot use the dates
|
||||
from Google Photos for syncing purposes.
|
||||
|
||||
### Size
|
||||
|
||||
The Google Photos API does not return the size of media. This means
|
||||
that when syncing to Google Photos, rclone can only do a file
|
||||
existence check.
|
||||
|
||||
It is possible to read the size of the media, but this needs an extra
|
||||
HTTP HEAD request per media item so is very slow and uses up a lot of
|
||||
transactions. This can be enabled with the `--gphotos-read-size`
|
||||
option or the `read_size = true` config parameter.
|
||||
|
||||
If you want to use the backend with `rclone mount` you will need to
|
||||
enable this flag otherwise you will not be able to read media off the
|
||||
mount.
|
||||
|
||||
### Albums
|
||||
|
||||
Rclone can only upload files to albums it created. This is a
|
||||
[limitation of the Google Photos API](https://developers.google.com/photos/library/guides/manage-albums).
|
||||
|
||||
Rclone can remove files it uploaded from albums it created only.
|
||||
|
||||
### Deleting files
|
||||
|
||||
Rclone can remove files from albums it created, but note that the
|
||||
Google Photos API does not allow media to be deleted permanently so
|
||||
this media will still remain. See [bug #109759781](https://issuetracker.google.com/issues/109759781).
|
||||
|
||||
Rclone cannot delete files anywhere except under `album`.
|
||||
|
||||
### Deleting albums
|
||||
|
||||
The Google Photos API does not support deleting albums - see [bug #135714733](https://issuetracker.google.com/issues/135714733).
|
||||
|
||||
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/googlephotos/googlephotos.go then run make backenddocs -->
|
||||
### Standard Options
|
||||
|
||||
Here are the standard options specific to google photos (Google Photos).
|
||||
|
||||
#### --gphotos-client-id
|
||||
|
||||
Google Application Client Id
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_GPHOTOS_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --gphotos-client-secret
|
||||
|
||||
Google Application Client Secret
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_GPHOTOS_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --gphotos-read-only
|
||||
|
||||
Set to make the Google Photos backend read only.
|
||||
|
||||
If you choose read only then rclone will only request read only access
|
||||
to your photos, otherwise rclone will request full access.
|
||||
|
||||
- Config: read_only
|
||||
- Env Var: RCLONE_GPHOTOS_READ_ONLY
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
### Advanced Options
|
||||
|
||||
Here are the advanced options specific to google photos (Google Photos).
|
||||
|
||||
#### --gphotos-read-size
|
||||
|
||||
Set to read the size of media items.
|
||||
|
||||
Normally rclone does not read the size of media items since this takes
|
||||
another transaction. This isn't necessary for syncing. However
|
||||
rclone mount needs to know the size of files in advance of reading
|
||||
them, so setting this flag when using rclone mount is recommended if
|
||||
you want to read the media.
|
||||
|
||||
- Config: read_size
|
||||
- Env Var: RCLONE_GPHOTOS_READ_SIZE
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
<!--- autogenerated options stop -->
|
|
@ -26,6 +26,7 @@ Here is an overview of the major features of each cloud storage system.
|
|||
| FTP | - | No | No | No | - |
|
||||
| Google Cloud Storage | MD5 | Yes | No | No | R/W |
|
||||
| Google Drive | MD5 | Yes | No | Yes | R/W |
|
||||
| Google Photos | - | No | No | Yes | R |
|
||||
| HTTP | - | No | No | No | R |
|
||||
| Hubic | MD5 | Yes | No | No | R/W |
|
||||
| Jottacloud | MD5 | Yes | Yes | No | R/W |
|
||||
|
@ -141,6 +142,7 @@ operations more efficient.
|
|||
| FTP | No | No | Yes | Yes | No | No | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
|
||||
| Google Cloud Storage | Yes | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
|
||||
| Google Drive | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes |
|
||||
| Google Photos | No | No | No | No | No | No | No | No | No |
|
||||
| HTTP | No | No | No | No | No | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
|
||||
| Hubic | Yes † | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes |
|
||||
| Jottacloud | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes |
|
||||
|
|
|
@ -67,6 +67,7 @@
|
|||
<li><a href="/ftp/"><i class="fa fa-file"></i> FTP</a></li>
|
||||
<li><a href="/googlecloudstorage/"><i class="fa fa-google"></i> Google Cloud Storage</a></li>
|
||||
<li><a href="/drive/"><i class="fa fa-google"></i> Google Drive</a></li>
|
||||
<li><a href="/googlephotos/"><i class="fa fa-photo"></i> Google Photos</a></li>
|
||||
<li><a href="/http/"><i class="fa fa-globe"></i> HTTP</a></li>
|
||||
<li><a href="/hubic/"><i class="fa fa-space-shuttle"></i> Hubic</a></li>
|
||||
<li><a href="/jottacloud/"><i class="fa fa-cloud"></i> Jottacloud</a></li>
|
||||
|
|
|
@ -44,6 +44,10 @@ backends:
|
|||
remote: "TestGoogleCloudStorage:"
|
||||
subdir: true
|
||||
fastlist: true
|
||||
- backend: "googlephotos"
|
||||
remote: "TestGooglePhotos:"
|
||||
tests:
|
||||
- backend
|
||||
- backend: "hubic"
|
||||
remote: "TestHubic:"
|
||||
subdir: false
|
||||
|
|
Loading…
Reference in New Issue
Block a user