2018-04-13 00:17:11 +08:00
|
|
|
// dedupe - gets rid of identical files remotes which can have duplicate file names (drive, mega)
|
|
|
|
|
|
|
|
package operations
|
|
|
|
|
|
|
|
import (
|
2019-06-17 16:34:30 +08:00
|
|
|
"context"
|
2018-04-13 00:17:11 +08:00
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
"path"
|
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
|
2019-07-29 01:47:38 +08:00
|
|
|
"github.com/rclone/rclone/fs"
|
2022-11-09 04:19:28 +08:00
|
|
|
"github.com/rclone/rclone/fs/accounting"
|
2019-07-29 01:47:38 +08:00
|
|
|
"github.com/rclone/rclone/fs/config"
|
|
|
|
"github.com/rclone/rclone/fs/hash"
|
|
|
|
"github.com/rclone/rclone/fs/walk"
|
2018-04-13 00:17:11 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
// dedupeRename renames the objs slice to different names
|
2019-06-17 16:34:30 +08:00
|
|
|
func dedupeRename(ctx context.Context, f fs.Fs, remote string, objs []fs.Object) {
|
2018-04-13 00:17:11 +08:00
|
|
|
doMove := f.Features().Move
|
|
|
|
if doMove == nil {
|
|
|
|
log.Fatalf("Fs %v doesn't support Move", f)
|
|
|
|
}
|
|
|
|
ext := path.Ext(remote)
|
|
|
|
base := remote[:len(remote)-len(ext)]
|
2018-10-29 12:05:45 +08:00
|
|
|
|
|
|
|
outer:
|
2018-04-13 00:17:11 +08:00
|
|
|
for i, o := range objs {
|
2018-10-29 12:05:45 +08:00
|
|
|
suffix := 1
|
|
|
|
newName := fmt.Sprintf("%s-%d%s", base, i+suffix, ext)
|
2019-06-17 16:34:30 +08:00
|
|
|
_, err := f.NewObject(ctx, newName)
|
2018-10-29 12:05:45 +08:00
|
|
|
for ; err != fs.ErrorObjectNotFound; suffix++ {
|
|
|
|
if err != nil {
|
2019-11-18 22:13:02 +08:00
|
|
|
err = fs.CountError(err)
|
2018-10-29 12:05:45 +08:00
|
|
|
fs.Errorf(o, "Failed to check for existing object: %v", err)
|
|
|
|
continue outer
|
|
|
|
}
|
|
|
|
if suffix > 100 {
|
|
|
|
fs.Errorf(o, "Could not find an available new name")
|
|
|
|
continue outer
|
|
|
|
}
|
|
|
|
newName = fmt.Sprintf("%s-%d%s", base, i+suffix, ext)
|
2019-06-17 16:34:30 +08:00
|
|
|
_, err = f.NewObject(ctx, newName)
|
2018-10-29 12:05:45 +08:00
|
|
|
}
|
2020-06-05 23:13:10 +08:00
|
|
|
if !SkipDestructive(ctx, o, "rename") {
|
2019-06-17 16:34:30 +08:00
|
|
|
newObj, err := doMove(ctx, o, newName)
|
2018-04-13 00:17:11 +08:00
|
|
|
if err != nil {
|
2019-11-18 22:13:02 +08:00
|
|
|
err = fs.CountError(err)
|
2018-04-13 00:17:11 +08:00
|
|
|
fs.Errorf(o, "Failed to rename: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
fs.Infof(newObj, "renamed from: %v", o)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// dedupeDeleteAllButOne deletes all but the one in keep
|
2019-06-17 16:34:30 +08:00
|
|
|
func dedupeDeleteAllButOne(ctx context.Context, keep int, remote string, objs []fs.Object) {
|
2019-10-08 23:35:53 +08:00
|
|
|
count := 0
|
2018-04-13 00:17:11 +08:00
|
|
|
for i, o := range objs {
|
|
|
|
if i == keep {
|
|
|
|
continue
|
|
|
|
}
|
2019-10-08 23:35:53 +08:00
|
|
|
err := DeleteFile(ctx, o)
|
|
|
|
if err == nil {
|
|
|
|
count++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if count > 0 {
|
|
|
|
fs.Logf(remote, "Deleted %d extra copies", count)
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// dedupeDeleteIdentical deletes all but one of identical (by hash) copies
|
2019-06-17 16:34:30 +08:00
|
|
|
func dedupeDeleteIdentical(ctx context.Context, ht hash.Type, remote string, objs []fs.Object) (remainingObjs []fs.Object) {
|
2020-11-05 19:33:32 +08:00
|
|
|
ci := fs.GetConfig(ctx)
|
|
|
|
|
2020-03-11 18:21:23 +08:00
|
|
|
// Make map of IDs
|
|
|
|
IDs := make(map[string]int, len(objs))
|
|
|
|
for _, o := range objs {
|
|
|
|
if do, ok := o.(fs.IDer); ok {
|
|
|
|
if ID := do.ID(); ID != "" {
|
|
|
|
IDs[ID]++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove duplicate IDs
|
|
|
|
newObjs := objs[:0]
|
|
|
|
for _, o := range objs {
|
|
|
|
if do, ok := o.(fs.IDer); ok {
|
|
|
|
if ID := do.ID(); ID != "" {
|
|
|
|
if IDs[ID] <= 1 {
|
|
|
|
newObjs = append(newObjs, o)
|
|
|
|
} else {
|
|
|
|
fs.Logf(o, "Ignoring as it appears %d times in the listing and deleting would lead to data loss", IDs[ID])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
objs = newObjs
|
|
|
|
|
2018-04-13 00:17:11 +08:00
|
|
|
// See how many of these duplicates are identical
|
2020-06-16 19:39:26 +08:00
|
|
|
dupesByID := make(map[string][]fs.Object, len(objs))
|
2018-04-13 00:17:11 +08:00
|
|
|
for _, o := range objs {
|
2020-06-16 19:39:26 +08:00
|
|
|
ID := ""
|
2020-11-05 19:33:32 +08:00
|
|
|
if ci.SizeOnly && o.Size() >= 0 {
|
2020-06-16 19:39:26 +08:00
|
|
|
ID = fmt.Sprintf("size %d", o.Size())
|
|
|
|
} else if ht != hash.None {
|
|
|
|
hashValue, err := o.Hash(ctx, ht)
|
|
|
|
if err == nil && hashValue != "" {
|
|
|
|
ID = fmt.Sprintf("%v %s", ht, hashValue)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ID == "" {
|
2018-04-13 00:17:11 +08:00
|
|
|
remainingObjs = append(remainingObjs, o)
|
|
|
|
} else {
|
2020-06-16 19:39:26 +08:00
|
|
|
dupesByID[ID] = append(dupesByID[ID], o)
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete identical duplicates, filling remainingObjs with the ones remaining
|
2020-06-16 19:39:26 +08:00
|
|
|
for ID, dupes := range dupesByID {
|
|
|
|
remainingObjs = append(remainingObjs, dupes[0])
|
|
|
|
if len(dupes) > 1 {
|
|
|
|
fs.Logf(remote, "Deleting %d/%d identical duplicates (%s)", len(dupes)-1, len(dupes), ID)
|
|
|
|
for _, o := range dupes[1:] {
|
2019-09-20 02:42:17 +08:00
|
|
|
err := DeleteFile(ctx, o)
|
|
|
|
if err != nil {
|
|
|
|
remainingObjs = append(remainingObjs, o)
|
|
|
|
}
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return remainingObjs
|
|
|
|
}
|
|
|
|
|
2020-12-02 23:25:55 +08:00
|
|
|
// dedupeList lists the duplicates and does nothing
|
|
|
|
func dedupeList(ctx context.Context, f fs.Fs, ht hash.Type, remote string, objs []fs.Object, byHash bool) {
|
|
|
|
fmt.Printf("%s: %d duplicates\n", remote, len(objs))
|
2018-04-13 00:17:11 +08:00
|
|
|
for i, o := range objs {
|
2020-06-16 19:39:26 +08:00
|
|
|
hashValue := ""
|
|
|
|
if ht != hash.None {
|
|
|
|
var err error
|
|
|
|
hashValue, err = o.Hash(ctx, ht)
|
|
|
|
if err != nil {
|
|
|
|
hashValue = err.Error()
|
|
|
|
}
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
2020-10-13 23:22:02 +08:00
|
|
|
if byHash {
|
|
|
|
fmt.Printf(" %d: %12d bytes, %s, %s\n", i+1, o.Size(), o.ModTime(ctx).Local().Format("2006-01-02 15:04:05.000000000"), o.Remote())
|
|
|
|
} else {
|
|
|
|
fmt.Printf(" %d: %12d bytes, %s, %v %32s\n", i+1, o.Size(), o.ModTime(ctx).Local().Format("2006-01-02 15:04:05.000000000"), ht, hashValue)
|
|
|
|
}
|
|
|
|
}
|
2020-12-02 23:25:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// dedupeInteractive interactively dedupes the slice of objects
|
2021-12-28 22:57:23 +08:00
|
|
|
func dedupeInteractive(ctx context.Context, f fs.Fs, ht hash.Type, remote string, objs []fs.Object, byHash bool) bool {
|
2020-12-02 23:25:55 +08:00
|
|
|
dedupeList(ctx, f, ht, remote, objs, byHash)
|
2020-10-13 23:22:02 +08:00
|
|
|
commands := []string{"sSkip and do nothing", "kKeep just one (choose which in next step)"}
|
|
|
|
if !byHash {
|
|
|
|
commands = append(commands, "rRename all to be different (by changing file.jpg to file-1.jpg)")
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
2021-12-28 22:57:23 +08:00
|
|
|
commands = append(commands, "qQuit")
|
2020-10-13 23:22:02 +08:00
|
|
|
switch config.Command(commands) {
|
2018-04-13 00:17:11 +08:00
|
|
|
case 's':
|
|
|
|
case 'k':
|
|
|
|
keep := config.ChooseNumber("Enter the number of the file to keep", 1, len(objs))
|
2019-06-17 16:34:30 +08:00
|
|
|
dedupeDeleteAllButOne(ctx, keep-1, remote, objs)
|
2018-04-13 00:17:11 +08:00
|
|
|
case 'r':
|
2019-06-17 16:34:30 +08:00
|
|
|
dedupeRename(ctx, f, remote, objs)
|
2021-12-28 22:57:23 +08:00
|
|
|
case 'q':
|
|
|
|
return false
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
2021-12-28 22:57:23 +08:00
|
|
|
return true
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeduplicateMode is how the dedupe command chooses what to do
|
|
|
|
type DeduplicateMode int
|
|
|
|
|
|
|
|
// Deduplicate modes
|
|
|
|
const (
|
|
|
|
DeduplicateInteractive DeduplicateMode = iota // interactively ask the user
|
|
|
|
DeduplicateSkip // skip all conflicts
|
|
|
|
DeduplicateFirst // choose the first object
|
|
|
|
DeduplicateNewest // choose the newest object
|
|
|
|
DeduplicateOldest // choose the oldest object
|
|
|
|
DeduplicateRename // rename the objects
|
2018-04-22 05:57:08 +08:00
|
|
|
DeduplicateLargest // choose the largest object
|
2020-01-16 21:47:15 +08:00
|
|
|
DeduplicateSmallest // choose the smallest object
|
2020-12-02 23:25:55 +08:00
|
|
|
DeduplicateList // list duplicates only
|
2018-04-13 00:17:11 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
func (x DeduplicateMode) String() string {
|
|
|
|
switch x {
|
|
|
|
case DeduplicateInteractive:
|
|
|
|
return "interactive"
|
|
|
|
case DeduplicateSkip:
|
|
|
|
return "skip"
|
|
|
|
case DeduplicateFirst:
|
|
|
|
return "first"
|
|
|
|
case DeduplicateNewest:
|
|
|
|
return "newest"
|
|
|
|
case DeduplicateOldest:
|
|
|
|
return "oldest"
|
|
|
|
case DeduplicateRename:
|
|
|
|
return "rename"
|
2018-04-22 05:57:08 +08:00
|
|
|
case DeduplicateLargest:
|
|
|
|
return "largest"
|
2020-01-16 21:47:15 +08:00
|
|
|
case DeduplicateSmallest:
|
|
|
|
return "smallest"
|
2020-12-02 23:25:55 +08:00
|
|
|
case DeduplicateList:
|
|
|
|
return "list"
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
|
|
|
return "unknown"
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set a DeduplicateMode from a string
|
|
|
|
func (x *DeduplicateMode) Set(s string) error {
|
|
|
|
switch strings.ToLower(s) {
|
|
|
|
case "interactive":
|
|
|
|
*x = DeduplicateInteractive
|
|
|
|
case "skip":
|
|
|
|
*x = DeduplicateSkip
|
|
|
|
case "first":
|
|
|
|
*x = DeduplicateFirst
|
|
|
|
case "newest":
|
|
|
|
*x = DeduplicateNewest
|
|
|
|
case "oldest":
|
|
|
|
*x = DeduplicateOldest
|
|
|
|
case "rename":
|
|
|
|
*x = DeduplicateRename
|
2018-04-22 05:57:08 +08:00
|
|
|
case "largest":
|
|
|
|
*x = DeduplicateLargest
|
2020-01-16 21:47:15 +08:00
|
|
|
case "smallest":
|
|
|
|
*x = DeduplicateSmallest
|
2020-12-02 23:25:55 +08:00
|
|
|
case "list":
|
|
|
|
*x = DeduplicateList
|
2018-04-13 00:17:11 +08:00
|
|
|
default:
|
2021-11-04 18:12:57 +08:00
|
|
|
return fmt.Errorf("unknown mode for dedupe %q", s)
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Type of the value
|
|
|
|
func (x *DeduplicateMode) Type() string {
|
|
|
|
return "string"
|
|
|
|
}
|
|
|
|
|
2021-03-12 01:40:29 +08:00
|
|
|
// Directory with entry count and links to parents
|
|
|
|
type dedupeDir struct {
|
|
|
|
dir fs.Directory
|
|
|
|
parent string
|
|
|
|
count int
|
|
|
|
}
|
|
|
|
|
|
|
|
// Map of directories by ID with recursive counts
|
|
|
|
type dedupeDirsMap map[string]*dedupeDir
|
|
|
|
|
|
|
|
func (dm dedupeDirsMap) get(id string) *dedupeDir {
|
|
|
|
d := dm[id]
|
|
|
|
if d == nil {
|
|
|
|
d = &dedupeDir{}
|
|
|
|
dm[id] = d
|
|
|
|
}
|
|
|
|
return d
|
|
|
|
}
|
|
|
|
|
|
|
|
func (dm dedupeDirsMap) increment(parent string) {
|
|
|
|
if parent != "" {
|
|
|
|
d := dm.get(parent)
|
|
|
|
d.count++
|
|
|
|
dm.increment(d.parent)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-13 00:17:11 +08:00
|
|
|
// dedupeFindDuplicateDirs scans f for duplicate directories
|
2021-03-12 01:40:29 +08:00
|
|
|
func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) (duplicateDirs [][]*dedupeDir, err error) {
|
|
|
|
dirsByID := dedupeDirsMap{}
|
|
|
|
dirs := map[string][]*dedupeDir{}
|
|
|
|
|
2020-11-05 19:33:32 +08:00
|
|
|
ci := fs.GetConfig(ctx)
|
2021-03-12 01:40:29 +08:00
|
|
|
err = walk.ListR(ctx, f, "", true, ci.MaxDepth, walk.ListAll, func(entries fs.DirEntries) error {
|
|
|
|
for _, entry := range entries {
|
2022-11-09 04:19:28 +08:00
|
|
|
tr := accounting.Stats(ctx).NewCheckingTransfer(entry)
|
|
|
|
|
2021-03-12 01:40:29 +08:00
|
|
|
remote := entry.Remote()
|
|
|
|
parentRemote := path.Dir(remote)
|
|
|
|
if parentRemote == "." {
|
|
|
|
parentRemote = ""
|
|
|
|
}
|
|
|
|
|
|
|
|
// Obtain ID of the object parent, if known.
|
|
|
|
// (This usually means that backend allows duplicate paths)
|
|
|
|
// Fall back to remote parent path, if unavailable.
|
|
|
|
var parent string
|
|
|
|
if entryParentIDer, ok := entry.(fs.ParentIDer); ok {
|
|
|
|
parent = entryParentIDer.ParentID()
|
|
|
|
}
|
|
|
|
if parent == "" {
|
|
|
|
parent = parentRemote
|
|
|
|
}
|
|
|
|
|
|
|
|
var ID string
|
|
|
|
if entryIDer, ok := entry.(fs.IDer); ok {
|
|
|
|
ID = entryIDer.ID()
|
|
|
|
}
|
|
|
|
if ID == "" {
|
|
|
|
ID = remote
|
|
|
|
}
|
|
|
|
|
|
|
|
if fsDir, ok := entry.(fs.Directory); ok {
|
|
|
|
d := dirsByID.get(ID)
|
|
|
|
d.dir = fsDir
|
|
|
|
d.parent = parent
|
|
|
|
dirs[remote] = append(dirs[remote], d)
|
|
|
|
}
|
|
|
|
|
|
|
|
dirsByID.increment(parent)
|
2022-11-09 04:19:28 +08:00
|
|
|
tr.Done(ctx, nil)
|
2021-03-12 01:40:29 +08:00
|
|
|
}
|
2018-04-13 00:17:11 +08:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
2021-11-04 18:12:57 +08:00
|
|
|
return nil, fmt.Errorf("find duplicate dirs: %w", err)
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
2021-03-12 01:40:29 +08:00
|
|
|
|
|
|
|
// Make sure parents are before children
|
2019-11-26 18:58:52 +08:00
|
|
|
duplicateNames := []string{}
|
|
|
|
for name, ds := range dirs {
|
2019-03-17 18:44:32 +08:00
|
|
|
if len(ds) > 1 {
|
2019-11-26 18:58:52 +08:00
|
|
|
duplicateNames = append(duplicateNames, name)
|
2019-03-17 18:44:32 +08:00
|
|
|
}
|
|
|
|
}
|
2019-11-26 18:58:52 +08:00
|
|
|
sort.Strings(duplicateNames)
|
|
|
|
for _, name := range duplicateNames {
|
|
|
|
duplicateDirs = append(duplicateDirs, dirs[name])
|
|
|
|
}
|
2021-03-12 01:40:29 +08:00
|
|
|
|
|
|
|
return
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// dedupeMergeDuplicateDirs merges all the duplicate directories found
|
2021-03-12 01:40:29 +08:00
|
|
|
func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]*dedupeDir) error {
|
2018-04-13 00:17:11 +08:00
|
|
|
mergeDirs := f.Features().MergeDirs
|
|
|
|
if mergeDirs == nil {
|
2021-11-04 18:12:57 +08:00
|
|
|
return fmt.Errorf("%v: can't merge directories", f)
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
|
|
|
dirCacheFlush := f.Features().DirCacheFlush
|
|
|
|
if dirCacheFlush == nil {
|
2021-11-04 18:12:57 +08:00
|
|
|
return fmt.Errorf("%v: can't flush dir cache", f)
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
2021-03-12 01:40:29 +08:00
|
|
|
for _, dedupeDirs := range duplicateDirs {
|
|
|
|
if SkipDestructive(ctx, dedupeDirs[0].dir, "merge duplicate directories") {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Put largest directory in front to minimize movements
|
|
|
|
fsDirs := []fs.Directory{}
|
|
|
|
largestCount := -1
|
|
|
|
largestIdx := 0
|
|
|
|
for i, d := range dedupeDirs {
|
|
|
|
fsDirs = append(fsDirs, d.dir)
|
|
|
|
if d.count > largestCount {
|
|
|
|
largestIdx = i
|
|
|
|
largestCount = d.count
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
|
|
|
}
|
2021-03-12 01:40:29 +08:00
|
|
|
fsDirs[largestIdx], fsDirs[0] = fsDirs[0], fsDirs[largestIdx]
|
|
|
|
|
|
|
|
fs.Infof(fsDirs[0], "Merging contents of duplicate directories")
|
|
|
|
err := mergeDirs(ctx, fsDirs)
|
|
|
|
if err != nil {
|
|
|
|
err = fs.CountError(err)
|
|
|
|
fs.Errorf(nil, "merge duplicate dirs: %v", err)
|
|
|
|
}
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
|
|
|
dirCacheFlush()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-01-16 21:47:15 +08:00
|
|
|
// sort oldest first
|
|
|
|
func sortOldestFirst(objs []fs.Object) {
|
|
|
|
sort.Slice(objs, func(i, j int) bool {
|
|
|
|
return objs[i].ModTime(context.TODO()).Before(objs[j].ModTime(context.TODO()))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// sort smallest first
|
|
|
|
func sortSmallestFirst(objs []fs.Object) {
|
|
|
|
sort.Slice(objs, func(i, j int) bool {
|
|
|
|
return objs[i].Size() < objs[j].Size()
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-04-13 00:17:11 +08:00
|
|
|
// Deduplicate interactively finds duplicate files and offers to
|
|
|
|
// delete all but one or rename them to be different. Only useful with
|
|
|
|
// Google Drive which can have duplicate file names.
|
2020-10-13 23:22:02 +08:00
|
|
|
func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode, byHash bool) error {
|
2020-11-05 19:33:32 +08:00
|
|
|
ci := fs.GetConfig(ctx)
|
2020-10-13 23:22:02 +08:00
|
|
|
// find a hash to use
|
|
|
|
ht := f.Hashes().GetOne()
|
|
|
|
what := "names"
|
|
|
|
if byHash {
|
|
|
|
if ht == hash.None {
|
2021-11-04 18:12:57 +08:00
|
|
|
return fmt.Errorf("%v has no hashes", f)
|
2020-10-13 23:22:02 +08:00
|
|
|
}
|
|
|
|
what = ht.String() + " hashes"
|
|
|
|
}
|
|
|
|
fs.Infof(f, "Looking for duplicate %s using %v mode.", what, mode)
|
2018-04-13 00:17:11 +08:00
|
|
|
|
2019-11-26 18:58:52 +08:00
|
|
|
// Find duplicate directories first and fix them
|
2020-10-13 23:22:02 +08:00
|
|
|
if !byHash {
|
|
|
|
duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f)
|
2018-04-13 00:17:11 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-03-12 01:40:29 +08:00
|
|
|
if len(duplicateDirs) > 0 {
|
2020-12-02 23:25:55 +08:00
|
|
|
if mode != DeduplicateList {
|
|
|
|
err = dedupeMergeDuplicateDirs(ctx, f, duplicateDirs)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
2021-03-12 01:40:29 +08:00
|
|
|
for _, dedupeDirs := range duplicateDirs {
|
|
|
|
remote := dedupeDirs[0].dir.Remote()
|
|
|
|
fmt.Printf("%s: %d duplicates of this directory\n", remote, len(dedupeDirs))
|
2020-12-02 23:25:55 +08:00
|
|
|
}
|
2020-10-13 23:22:02 +08:00
|
|
|
}
|
|
|
|
}
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now find duplicate files
|
|
|
|
files := map[string][]fs.Object{}
|
2020-10-13 23:22:02 +08:00
|
|
|
err := walk.ListR(ctx, f, "", true, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
2018-04-13 00:17:11 +08:00
|
|
|
entries.ForObject(func(o fs.Object) {
|
2022-11-09 04:19:28 +08:00
|
|
|
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
|
|
|
|
defer tr.Done(ctx, nil)
|
|
|
|
|
2020-10-13 23:22:02 +08:00
|
|
|
var remote string
|
|
|
|
var err error
|
|
|
|
if byHash {
|
|
|
|
remote, err = o.Hash(ctx, ht)
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(o, "Failed to hash: %v", err)
|
|
|
|
remote = ""
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
remote = o.Remote()
|
|
|
|
}
|
|
|
|
if remote != "" {
|
|
|
|
files[remote] = append(files[remote], o)
|
|
|
|
}
|
2018-04-13 00:17:11 +08:00
|
|
|
})
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-04-22 05:57:08 +08:00
|
|
|
|
2018-04-13 00:17:11 +08:00
|
|
|
for remote, objs := range files {
|
2021-03-12 01:40:29 +08:00
|
|
|
if len(objs) <= 1 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
fs.Logf(remote, "Found %d files with duplicate %s", len(objs), what)
|
|
|
|
if !byHash && mode != DeduplicateList {
|
|
|
|
objs = dedupeDeleteIdentical(ctx, ht, remote, objs)
|
|
|
|
if len(objs) <= 1 {
|
|
|
|
fs.Logf(remote, "All duplicates removed")
|
|
|
|
continue
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
|
|
|
}
|
2021-03-12 01:40:29 +08:00
|
|
|
switch mode {
|
|
|
|
case DeduplicateInteractive:
|
2021-12-28 22:57:23 +08:00
|
|
|
if !dedupeInteractive(ctx, f, ht, remote, objs, byHash) {
|
|
|
|
return nil
|
|
|
|
}
|
2021-03-12 01:40:29 +08:00
|
|
|
case DeduplicateFirst:
|
|
|
|
dedupeDeleteAllButOne(ctx, 0, remote, objs)
|
|
|
|
case DeduplicateNewest:
|
|
|
|
sortOldestFirst(objs)
|
|
|
|
dedupeDeleteAllButOne(ctx, len(objs)-1, remote, objs)
|
|
|
|
case DeduplicateOldest:
|
|
|
|
sortOldestFirst(objs)
|
|
|
|
dedupeDeleteAllButOne(ctx, 0, remote, objs)
|
|
|
|
case DeduplicateRename:
|
|
|
|
dedupeRename(ctx, f, remote, objs)
|
|
|
|
case DeduplicateLargest:
|
|
|
|
sortSmallestFirst(objs)
|
|
|
|
dedupeDeleteAllButOne(ctx, len(objs)-1, remote, objs)
|
|
|
|
case DeduplicateSmallest:
|
|
|
|
sortSmallestFirst(objs)
|
|
|
|
dedupeDeleteAllButOne(ctx, 0, remote, objs)
|
|
|
|
case DeduplicateSkip:
|
|
|
|
fs.Logf(remote, "Skipping %d files with duplicate %s", len(objs), what)
|
|
|
|
case DeduplicateList:
|
|
|
|
dedupeList(ctx, f, ht, remote, objs, byHash)
|
|
|
|
default:
|
|
|
|
//skip
|
|
|
|
}
|
2018-04-13 00:17:11 +08:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|