2021-05-17 00:39:33 +08:00
// Package bisync implements bisync
// Copyright (c) 2017-2020 Chris Nelson
// Contributions to original python version: Hildo G. Jr., e2t, kalemas, silenceleaf
package bisync
import (
"context"
2021-11-04 18:12:57 +08:00
"errors"
"fmt"
2021-05-17 00:39:33 +08:00
"os"
"path/filepath"
"strconv"
gosync "sync"
"github.com/rclone/rclone/cmd/bisync/bilib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/atexit"
2023-11-06 19:59:41 +08:00
"github.com/rclone/rclone/lib/terminal"
2021-05-17 00:39:33 +08:00
)
// ErrBisyncAborted signals that bisync is aborted and forces exit code 2
var ErrBisyncAborted = errors . New ( "bisync aborted" )
// bisyncRun keeps bisync runtime state
type bisyncRun struct {
2023-10-07 04:38:47 +08:00
fs1 fs . Fs
fs2 fs . Fs
abort bool
critical bool
retryable bool
basePath string
workDir string
listing1 string
listing2 string
newListing1 string
newListing2 string
2023-11-09 18:04:33 +08:00
aliases bilib . AliasMap
2023-10-07 04:38:47 +08:00
opt * Options
2023-11-06 23:34:47 +08:00
octx context . Context
fctx context . Context
2021-05-17 00:39:33 +08:00
}
2023-10-01 21:36:19 +08:00
type queues struct {
copy1to2 bilib . Names
copy2to1 bilib . Names
renamed1 bilib . Names // renamed on 1 and copied to 2
renamed2 bilib . Names // renamed on 2 and copied to 1
renameSkipped bilib . Names // not renamed because it was equal
2023-10-07 18:33:43 +08:00
skippedDirs1 * fileList
skippedDirs2 * fileList
2023-10-01 21:36:19 +08:00
deletedonboth bilib . Names
}
2021-05-17 00:39:33 +08:00
// Bisync handles lock file, performs bisync run and checks exit status
func Bisync ( ctx context . Context , fs1 , fs2 fs . Fs , optArg * Options ) ( err error ) {
opt := * optArg // ensure that input is never changed
b := & bisyncRun {
fs1 : fs1 ,
fs2 : fs2 ,
opt : & opt ,
}
if opt . CheckFilename == "" {
opt . CheckFilename = DefaultCheckFilename
}
if opt . Workdir == "" {
opt . Workdir = DefaultWorkdir
}
2023-11-12 23:34:38 +08:00
ci := fs . GetConfig ( ctx )
opt . OrigBackupDir = ci . BackupDir
2021-05-17 00:39:33 +08:00
if ! opt . DryRun && ! opt . Force {
if fs1 . Precision ( ) == fs . ModTimeNotSupported {
return errors . New ( "modification time support is missing on path1" )
}
if fs2 . Precision ( ) == fs . ModTimeNotSupported {
return errors . New ( "modification time support is missing on path2" )
}
}
if b . workDir , err = filepath . Abs ( opt . Workdir ) ; err != nil {
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "failed to make workdir absolute: %w" , err )
2021-05-17 00:39:33 +08:00
}
if err = os . MkdirAll ( b . workDir , os . ModePerm ) ; err != nil {
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "failed to create workdir: %w" , err )
2021-05-17 00:39:33 +08:00
}
// Produce a unique name for the sync operation
bisync: normalize session name to non-canonical - fixes #7423
Before this change, bisync used the "canonical" Fs name in the filename for its
listing files, including any {hexstring} suffix. An unintended consequence of
this was that if a user added a backend-specific flag from the command line
(thus "overriding" the config), bisync would fail to find the listing files it
created during the prior run without this flag, due to the path now having a
{hexstring} suffix that wasn't there before (or vice versa, if the flag was
present when the session was established, and later removed.) This would
sometimes cause bisync to fail with a critical error (if no listing existed
with the alternate name), or worse -- it would sometimes cause bisync to use an
old, incorrect listing (if old listings with the alternate name DID still
exist, from before the user changed their flags.)
After this change, the issue is fixed by always normalizing the SessionName to
the non-canonical version (no {hexstring} suffix), regardless of the flags. To
avoid a breaking change, we first check if a suffixed listing exists. If so, we
rename it (and overwrite the non-suffixed version, if any.) If not, we carry on
with the non-suffixed version. (We should only find a suffixed version if
created prior to this commit.)
The result for the user is that the same pair of paths will always use the same
.lst filenames, with or without backend-specific flags.
2023-12-01 08:46:11 +08:00
b . basePath = bilib . BasePath ( ctx , b . workDir , b . fs1 , b . fs2 )
2023-10-07 04:38:47 +08:00
b . listing1 = b . basePath + ".path1.lst"
b . listing2 = b . basePath + ".path2.lst"
b . newListing1 = b . listing1 + "-new"
b . newListing2 = b . listing2 + "-new"
2023-11-09 18:04:33 +08:00
b . aliases = bilib . AliasMap { }
2021-05-17 00:39:33 +08:00
// Handle lock file
lockFile := ""
if ! opt . DryRun {
lockFile = b . basePath + ".lck"
if bilib . FileExists ( lockFile ) {
2023-11-11 13:34:41 +08:00
errTip := Color ( terminal . MagentaFg , "Tip: this indicates that another bisync run (of these same paths) either is still running or was interrupted before completion. \n" )
errTip += Color ( terminal . MagentaFg , "If you're SURE you want to override this safety feature, you can delete the lock file with the following command, then run bisync again: \n" )
errTip += fmt . Sprintf ( Color ( terminal . HiRedFg , "rclone deletefile \"%s\"" ) , lockFile )
return fmt . Errorf ( Color ( terminal . RedFg , "prior lock file found: %s \n" ) + errTip , Color ( terminal . HiYellowFg , lockFile ) )
2021-05-17 00:39:33 +08:00
}
pidStr := [ ] byte ( strconv . Itoa ( os . Getpid ( ) ) )
2022-08-20 22:38:02 +08:00
if err = os . WriteFile ( lockFile , pidStr , bilib . PermSecure ) ; err != nil {
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "cannot create lock file: %s: %w" , lockFile , err )
2021-05-17 00:39:33 +08:00
}
fs . Debugf ( nil , "Lock file created: %s" , lockFile )
}
// Handle SIGINT
var finaliseOnce gosync . Once
markFailed := func ( file string ) {
failFile := file + "-err"
if bilib . FileExists ( file ) {
_ = os . Remove ( failFile )
_ = os . Rename ( file , failFile )
}
}
finalise := func ( ) {
finaliseOnce . Do ( func ( ) {
if atexit . Signalled ( ) {
fs . Logf ( nil , "Bisync interrupted. Must run --resync to recover." )
2023-10-07 04:38:47 +08:00
markFailed ( b . listing1 )
markFailed ( b . listing2 )
2021-05-17 00:39:33 +08:00
_ = os . Remove ( lockFile )
}
} )
}
fnHandle := atexit . Register ( finalise )
defer atexit . Unregister ( fnHandle )
// run bisync
2023-10-07 04:38:47 +08:00
err = b . runLocked ( ctx )
2021-05-17 00:39:33 +08:00
if lockFile != "" {
errUnlock := os . Remove ( lockFile )
if errUnlock == nil {
fs . Debugf ( nil , "Lock file removed: %s" , lockFile )
} else if err == nil {
err = errUnlock
} else {
fs . Errorf ( nil , "cannot remove lockfile %s: %v" , lockFile , errUnlock )
}
}
if b . critical {
2023-07-11 18:57:49 +08:00
if b . retryable && b . opt . Resilient {
2023-11-06 19:59:41 +08:00
fs . Errorf ( nil , Color ( terminal . RedFg , "Bisync critical error: %v" ) , err )
fs . Errorf ( nil , Color ( terminal . YellowFg , "Bisync aborted. Error is retryable without --resync due to --resilient mode." ) )
2023-07-11 18:57:49 +08:00
} else {
2023-10-07 04:38:47 +08:00
if bilib . FileExists ( b . listing1 ) {
_ = os . Rename ( b . listing1 , b . listing1 + "-err" )
2023-07-11 19:09:06 +08:00
}
2023-10-07 04:38:47 +08:00
if bilib . FileExists ( b . listing2 ) {
_ = os . Rename ( b . listing2 , b . listing2 + "-err" )
2023-07-11 19:09:06 +08:00
}
2023-11-06 19:59:41 +08:00
fs . Errorf ( nil , Color ( terminal . RedFg , "Bisync critical error: %v" ) , err )
fs . Errorf ( nil , Color ( terminal . RedFg , "Bisync aborted. Must run --resync to recover." ) )
2023-07-11 18:57:49 +08:00
}
2021-05-17 00:39:33 +08:00
return ErrBisyncAborted
}
if b . abort {
2023-11-06 19:59:41 +08:00
fs . Logf ( nil , Color ( terminal . RedFg , "Bisync aborted. Please try again." ) )
2021-05-17 00:39:33 +08:00
}
if err == nil {
2023-11-06 19:59:41 +08:00
fs . Infof ( nil , Color ( terminal . GreenFg , "Bisync successful" ) )
2021-05-17 00:39:33 +08:00
}
return err
}
// runLocked performs a full bisync run
2023-10-07 04:38:47 +08:00
func ( b * bisyncRun ) runLocked ( octx context . Context ) ( err error ) {
2021-05-17 00:39:33 +08:00
opt := b . opt
path1 := bilib . FsPath ( b . fs1 )
path2 := bilib . FsPath ( b . fs2 )
if opt . CheckSync == CheckSyncOnly {
fs . Infof ( nil , "Validating listings for Path1 %s vs Path2 %s" , quotePath ( path1 ) , quotePath ( path2 ) )
2023-10-07 04:38:47 +08:00
if err = b . checkSync ( b . listing1 , b . listing2 ) ; err != nil {
2021-05-17 00:39:33 +08:00
b . critical = true
2023-07-11 18:57:49 +08:00
b . retryable = true
2021-05-17 00:39:33 +08:00
}
return err
}
fs . Infof ( nil , "Synching Path1 %s with Path2 %s" , quotePath ( path1 ) , quotePath ( path2 ) )
if opt . DryRun {
// In --dry-run mode, preserve original listings and save updates to the .lst-dry files
2023-10-07 04:38:47 +08:00
origListing1 := b . listing1
origListing2 := b . listing2
b . listing1 += "-dry"
b . listing2 += "-dry"
b . newListing1 = b . listing1 + "-new"
b . newListing2 = b . listing2 + "-new"
if err := bilib . CopyFileIfExists ( origListing1 , b . listing1 ) ; err != nil {
2021-05-17 00:39:33 +08:00
return err
}
2023-10-07 04:38:47 +08:00
if err := bilib . CopyFileIfExists ( origListing2 , b . listing2 ) ; err != nil {
2021-05-17 00:39:33 +08:00
return err
}
}
// Create second context with filters
var fctx context . Context
if fctx , err = b . opt . applyFilters ( octx ) ; err != nil {
b . critical = true
2023-07-11 18:57:49 +08:00
b . retryable = true
2021-05-17 00:39:33 +08:00
return
}
2023-11-06 23:34:47 +08:00
b . octx = octx
b . fctx = fctx
2021-05-17 00:39:33 +08:00
// Generate Path1 and Path2 listings and copy any unique Path2 files to Path1
if opt . Resync {
2023-10-07 04:38:47 +08:00
return b . resync ( octx , fctx )
2021-05-17 00:39:33 +08:00
}
// Check for existence of prior Path1 and Path2 listings
2023-10-07 04:38:47 +08:00
if ! bilib . FileExists ( b . listing1 ) || ! bilib . FileExists ( b . listing2 ) {
2021-05-17 00:39:33 +08:00
// On prior critical error abort, the prior listings are renamed to .lst-err to lock out further runs
b . critical = true
2023-07-11 18:57:49 +08:00
b . retryable = true
2023-11-11 13:34:41 +08:00
errTip := Color ( terminal . MagentaFg , "Tip: here are the filenames we were looking for. Do they exist? \n" )
errTip += fmt . Sprintf ( Color ( terminal . CyanFg , "Path1: %s\n" ) , Color ( terminal . HiBlueFg , b . listing1 ) )
errTip += fmt . Sprintf ( Color ( terminal . CyanFg , "Path2: %s\n" ) , Color ( terminal . HiBlueFg , b . listing2 ) )
errTip += Color ( terminal . MagentaFg , "Try running this command to inspect the work dir: \n" )
errTip += fmt . Sprintf ( Color ( terminal . HiCyanFg , "rclone lsl \"%s\"" ) , b . workDir )
return errors . New ( "cannot find prior Path1 or Path2 listings, likely due to critical error on prior run \n" + errTip )
2021-05-17 00:39:33 +08:00
}
2023-10-07 18:33:43 +08:00
fs . Infof ( nil , "Building Path1 and Path2 listings" )
ls1 , ls2 , err = b . makeMarchListing ( fctx )
if err != nil {
return err
}
2021-05-17 00:39:33 +08:00
// Check for Path1 deltas relative to the prior sync
fs . Infof ( nil , "Path1 checking for diffs" )
2023-10-07 18:33:43 +08:00
ds1 , err := b . findDeltas ( fctx , b . fs1 , b . listing1 , ls1 , "Path1" )
2021-05-17 00:39:33 +08:00
if err != nil {
return err
}
ds1 . printStats ( )
// Check for Path2 deltas relative to the prior sync
fs . Infof ( nil , "Path2 checking for diffs" )
2023-10-07 18:33:43 +08:00
ds2 , err := b . findDeltas ( fctx , b . fs2 , b . listing2 , ls2 , "Path2" )
2021-05-17 00:39:33 +08:00
if err != nil {
return err
}
ds2 . printStats ( )
// Check access health on the Path1 and Path2 filesystems
if opt . CheckAccess {
fs . Infof ( nil , "Checking access health" )
err = b . checkAccess ( ds1 . checkFiles , ds2 . checkFiles )
if err != nil {
b . critical = true
2023-07-11 18:57:49 +08:00
b . retryable = true
2021-05-17 00:39:33 +08:00
return
}
}
// Check for too many deleted files - possible error condition.
// Don't want to start deleting on the other side!
if ! opt . Force {
if ds1 . excessDeletes ( ) || ds2 . excessDeletes ( ) {
b . abort = true
return errors . New ( "too many deletes" )
}
}
// Check for all files changed such as all dates changed due to DST change
// to avoid errant copy everything.
if ! opt . Force {
msg := "Safety abort: all files were changed on %s %s. Run with --force if desired."
if ! ds1 . foundSame {
fs . Errorf ( nil , msg , ds1 . msg , quotePath ( path1 ) )
}
if ! ds2 . foundSame {
fs . Errorf ( nil , msg , ds2 . msg , quotePath ( path2 ) )
}
if ! ds1 . foundSame || ! ds2 . foundSame {
b . abort = true
return errors . New ( "all files were changed" )
}
}
// Determine and apply changes to Path1 and Path2
noChanges := ds1 . empty ( ) && ds2 . empty ( )
2023-10-01 21:36:19 +08:00
changes1 := false // 2to1
changes2 := false // 1to2
results2to1 := [ ] Results { }
results1to2 := [ ] Results { }
queues := queues { }
2021-05-17 00:39:33 +08:00
if noChanges {
fs . Infof ( nil , "No changes found" )
} else {
fs . Infof ( nil , "Applying changes" )
2023-10-01 21:36:19 +08:00
changes1 , changes2 , results2to1 , results1to2 , queues , err = b . applyDeltas ( octx , ds1 , ds2 )
2021-05-17 00:39:33 +08:00
if err != nil {
b . critical = true
2023-07-11 18:57:49 +08:00
// b.retryable = true // not sure about this one
2021-05-17 00:39:33 +08:00
return err
}
}
// Clean up and check listings integrity
fs . Infof ( nil , "Updating listings" )
var err1 , err2 error
2023-10-07 04:38:47 +08:00
b . saveOldListings ( )
// save new listings
2021-05-17 00:39:33 +08:00
if noChanges {
2023-10-07 18:33:43 +08:00
b . replaceCurrentListings ( )
2021-05-17 00:39:33 +08:00
} else {
2023-10-01 21:36:19 +08:00
if changes1 { // 2to1
2023-10-07 04:38:47 +08:00
err1 = b . modifyListing ( fctx , b . fs2 , b . fs1 , results2to1 , queues , false )
2021-05-17 00:39:33 +08:00
} else {
2023-10-07 04:38:47 +08:00
err1 = bilib . CopyFileIfExists ( b . newListing1 , b . listing1 )
2021-05-17 00:39:33 +08:00
}
2023-10-01 21:36:19 +08:00
if changes2 { // 1to2
2023-10-07 04:38:47 +08:00
err2 = b . modifyListing ( fctx , b . fs1 , b . fs2 , results1to2 , queues , true )
2021-05-17 00:39:33 +08:00
} else {
2023-10-07 04:38:47 +08:00
err2 = bilib . CopyFileIfExists ( b . newListing2 , b . listing2 )
2021-05-17 00:39:33 +08:00
}
}
err = err1
if err == nil {
err = err2
}
if err != nil {
b . critical = true
2023-07-11 18:57:49 +08:00
b . retryable = true
2021-05-17 00:39:33 +08:00
return err
}
if ! opt . NoCleanup {
2023-10-07 04:38:47 +08:00
_ = os . Remove ( b . newListing1 )
_ = os . Remove ( b . newListing2 )
2021-05-17 00:39:33 +08:00
}
if opt . CheckSync == CheckSyncTrue && ! opt . DryRun {
fs . Infof ( nil , "Validating listings for Path1 %s vs Path2 %s" , quotePath ( path1 ) , quotePath ( path2 ) )
2023-10-07 04:38:47 +08:00
if err := b . checkSync ( b . listing1 , b . listing2 ) ; err != nil {
2021-05-17 00:39:33 +08:00
b . critical = true
return err
}
}
// Optional rmdirs for empty directories
if opt . RemoveEmptyDirs {
fs . Infof ( nil , "Removing empty directories" )
2023-11-12 23:34:38 +08:00
fctx = b . setBackupDir ( fctx , 1 )
2021-05-17 00:39:33 +08:00
err1 := operations . Rmdirs ( fctx , b . fs1 , "" , true )
2023-11-12 23:34:38 +08:00
fctx = b . setBackupDir ( fctx , 2 )
2021-05-17 00:39:33 +08:00
err2 := operations . Rmdirs ( fctx , b . fs2 , "" , true )
err := err1
if err == nil {
err = err2
}
if err != nil {
b . critical = true
2023-07-11 18:57:49 +08:00
b . retryable = true
2021-05-17 00:39:33 +08:00
return err
}
}
return nil
}
// resync implements the --resync mode.
// It will generate path1 and path2 listings
// and copy any unique path2 files to path1.
2023-10-07 04:38:47 +08:00
func ( b * bisyncRun ) resync ( octx , fctx context . Context ) error {
2021-05-17 00:39:33 +08:00
fs . Infof ( nil , "Copying unique Path2 files to Path1" )
2023-11-06 23:34:47 +08:00
// Save blank filelists (will be filled from sync results)
var ls1 = newFileList ( )
var ls2 = newFileList ( )
err = ls1 . save ( fctx , b . newListing1 )
2021-05-17 00:39:33 +08:00
if err != nil {
2023-11-06 23:34:47 +08:00
b . abort = true
2021-05-17 00:39:33 +08:00
}
2023-11-06 23:34:47 +08:00
err = ls2 . save ( fctx , b . newListing2 )
2021-05-17 00:39:33 +08:00
if err != nil {
2023-11-06 23:34:47 +08:00
b . abort = true
2021-05-17 00:39:33 +08:00
}
2023-07-11 16:56:12 +08:00
// Check access health on the Path1 and Path2 filesystems
// enforce even though this is --resync
if b . opt . CheckAccess {
fs . Infof ( nil , "Checking access health" )
2023-11-06 23:34:47 +08:00
filesNow1 , filesNow2 , err := b . findCheckFiles ( fctx )
if err != nil {
b . critical = true
b . retryable = true
return err
}
2023-07-11 16:56:12 +08:00
ds1 := & deltaSet {
checkFiles : bilib . Names { } ,
}
ds2 := & deltaSet {
checkFiles : bilib . Names { } ,
}
for _ , file := range filesNow1 . list {
if filepath . Base ( file ) == b . opt . CheckFilename {
ds1 . checkFiles . Add ( file )
}
}
for _ , file := range filesNow2 . list {
if filepath . Base ( file ) == b . opt . CheckFilename {
ds2 . checkFiles . Add ( file )
}
}
err = b . checkAccess ( ds1 . checkFiles , ds2 . checkFiles )
if err != nil {
b . critical = true
2023-07-11 18:57:49 +08:00
b . retryable = true
2023-07-11 16:56:12 +08:00
return err
}
}
2023-10-01 21:36:19 +08:00
var results2to1 [ ] Results
var results1to2 [ ] Results
queues := queues { }
2021-05-17 00:39:33 +08:00
2023-11-06 23:34:47 +08:00
b . indent ( "Path2" , "Path1" , "Resync is copying UNIQUE files to" )
2021-05-17 00:39:33 +08:00
ctxRun := b . opt . setDryRun ( fctx )
// fctx has our extra filters added!
ctxSync , filterSync := filter . AddConfig ( ctxRun )
if filterSync . Opt . MinSize == - 1 {
bisync: support files with unknown length, including Google Docs - fixes #5696
Before this change, bisync intentionally ignored Google Docs (albeit in a
buggy way that caused problems during --resync.) After this change, Google Docs
(including Google Sheets, Slides, etc.) are now supported in bisync, subject to
the same options, defaults, and limitations as in `rclone sync`. When bisyncing
drive with non-drive backends, the drive -> non-drive direction is controlled
by `--drive-export-formats` (default `"docx,xlsx,pptx,svg"`) and the non-drive
-> drive direction is controlled by `--drive-import-formats` (default none.)
For example, with the default export/import formats, a Google Sheet on the
drive side will be synced to an `.xlsx` file on the non-drive side. In the
reverse direction, `.xlsx` files with filenames that match an existing Google
Sheet will be synced to that Google Sheet, while `.xlsx` files that do NOT
match an existing Google Sheet will be copied to drive as normal `.xlsx` files
(without conversion to Sheets, although the Google Drive web browser UI may
still give you the option to open it as one.)
If `--drive-import-formats` is set (it's not, by default), then all of the
specified formats will be converted to Google Docs, if there is no existing
Google Doc with a matching name. Caution: such conversion can be quite lossy,
and in most cases it's probably not what you want!
To bisync Google Docs as URL shortcut links (in a manner similar to "Drive for
Desktop"), use: `--drive-export-formats url` (or alternatives.)
Note that these link files cannot be edited on the non-drive side -- you will
get errors if you try to sync an edited link file back to drive. They CAN be
deleted (it will result in deleting the corresponding Google Doc.) If you
create a `.url` file on the non-drive side that does not match an existing
Google Doc, bisyncing it will just result in copying the literal `.url` file
over to drive (no Google Doc will be created.) So, as a general rule of thumb,
think of them as read-only placeholders on the non-drive side, and make all
your changes on the drive side.
Likewise, even with other export-formats, it is best to only move/rename Google
Docs on the drive side. This is because otherwise, bisync will interpret this
as a file deleted and another created, and accordingly, it will delete the
Google Doc and create a new file at the new path. (Whether or not that new file
is a Google Doc depends on `--drive-import-formats`.)
Lastly, take note that all Google Docs on the drive side have a size of `-1`
and no checksum. Therefore, they cannot be reliably synced with the
`--checksum` or `--size-only` flags. (To be exact: they will still get
created/deleted, and bisync's delta engine will notice changes and queue them
for syncing, but the underlying sync function will consider them identical and
skip them.) To work around this, use the default (modtime and size) instead of
`--checksum` or `--size-only`.
To ignore Google Docs entirely, use `--drive-skip-gdocs`.
Nearly all of the Google Docs logic is outsourced to the Drive backend, so
future changes should also be supported by bisync.
2023-08-24 20:13:02 +08:00
fs . Debugf ( nil , "filterSync.Opt.MinSize: %v" , filterSync . Opt . MinSize )
2021-05-17 00:39:33 +08:00
}
2023-11-06 23:34:47 +08:00
ci := fs . GetConfig ( ctxSync )
ci . IgnoreExisting = true
2023-11-12 23:34:38 +08:00
ctxSync = b . setBackupDir ( ctxSync , 1 )
// 2 to 1
2023-11-06 23:34:47 +08:00
if results2to1 , err = b . resyncDir ( ctxSync , b . fs2 , b . fs1 ) ; err != nil {
2021-05-17 00:39:33 +08:00
b . critical = true
return err
}
2023-11-06 23:34:47 +08:00
b . indent ( "Path1" , "Path2" , "Resync is copying UNIQUE OR DIFFERING files to" )
ci . IgnoreExisting = false
2023-11-12 23:34:38 +08:00
ctxSync = b . setBackupDir ( ctxSync , 2 )
// 1 to 2
2023-11-06 23:34:47 +08:00
if results1to2 , err = b . resyncDir ( ctxSync , b . fs1 , b . fs2 ) ; err != nil {
b . critical = true
return err
2023-07-11 19:09:06 +08:00
}
2021-05-17 00:39:33 +08:00
fs . Infof ( nil , "Resync updating listings" )
2023-11-09 18:04:33 +08:00
b . saveOldListings ( ) // may not exist, as this is --resync
2023-10-07 18:33:43 +08:00
b . replaceCurrentListings ( )
2023-10-01 21:36:19 +08:00
2023-11-06 23:34:47 +08:00
resultsToQueue := func ( results [ ] Results ) bilib . Names {
names := bilib . Names { }
for _ , result := range results {
if result . Name != "" &&
( result . Flags != "d" || b . opt . CreateEmptySrcDirs ) &&
result . IsSrc && result . Src != "" &&
( result . Winner . Err == nil || result . Flags == "d" ) {
names . Add ( result . Name )
}
}
return names
}
2023-10-01 21:36:19 +08:00
// resync 2to1
2023-11-06 23:34:47 +08:00
queues . copy2to1 = resultsToQueue ( results2to1 )
2023-10-07 04:38:47 +08:00
if err = b . modifyListing ( fctx , b . fs2 , b . fs1 , results2to1 , queues , false ) ; err != nil {
2023-10-01 21:36:19 +08:00
b . critical = true
return err
}
// resync 1to2
2023-11-06 23:34:47 +08:00
queues . copy1to2 = resultsToQueue ( results1to2 )
2023-10-07 04:38:47 +08:00
if err = b . modifyListing ( fctx , b . fs1 , b . fs2 , results1to2 , queues , true ) ; err != nil {
2021-05-17 00:39:33 +08:00
b . critical = true
return err
}
if ! b . opt . NoCleanup {
2023-10-07 04:38:47 +08:00
_ = os . Remove ( b . newListing1 )
_ = os . Remove ( b . newListing2 )
2021-05-17 00:39:33 +08:00
}
return nil
}
// checkSync validates listings
func ( b * bisyncRun ) checkSync ( listing1 , listing2 string ) error {
files1 , err := b . loadListing ( listing1 )
if err != nil {
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "cannot read prior listing of Path1: %w" , err )
2021-05-17 00:39:33 +08:00
}
files2 , err := b . loadListing ( listing2 )
if err != nil {
2021-11-04 18:12:57 +08:00
return fmt . Errorf ( "cannot read prior listing of Path2: %w" , err )
2021-05-17 00:39:33 +08:00
}
ok := true
for _ , file := range files1 . list {
2023-11-09 18:04:33 +08:00
if ! files2 . has ( file ) && ! files2 . has ( b . aliases . Alias ( file ) ) {
2021-05-17 00:39:33 +08:00
b . indent ( "ERROR" , file , "Path1 file not found in Path2" )
ok = false
}
}
for _ , file := range files2 . list {
2023-11-09 18:04:33 +08:00
if ! files1 . has ( file ) && ! files1 . has ( b . aliases . Alias ( file ) ) {
2021-05-17 00:39:33 +08:00
b . indent ( "ERROR" , file , "Path2 file not found in Path1" )
ok = false
}
}
if ! ok {
return errors . New ( "path1 and path2 are out of sync, run --resync to recover" )
}
return nil
}
// checkAccess validates access health
func ( b * bisyncRun ) checkAccess ( checkFiles1 , checkFiles2 bilib . Names ) error {
ok := true
opt := b . opt
prefix := "Access test failed:"
numChecks1 := len ( checkFiles1 )
numChecks2 := len ( checkFiles2 )
if numChecks1 == 0 || numChecks1 != numChecks2 {
2023-11-11 13:34:41 +08:00
if numChecks1 == 0 && numChecks2 == 0 {
fs . Logf ( "--check-access" , Color ( terminal . RedFg , "Failed to find any files named %s\n More info: %s" ) , Color ( terminal . CyanFg , opt . CheckFilename ) , Color ( terminal . BlueFg , "https://rclone.org/bisync/#check-access" ) )
}
2021-05-17 00:39:33 +08:00
fs . Errorf ( nil , "%s Path1 count %d, Path2 count %d - %s" , prefix , numChecks1 , numChecks2 , opt . CheckFilename )
ok = false
}
for file := range checkFiles1 {
if ! checkFiles2 . Has ( file ) {
b . indentf ( "ERROR" , file , "%s Path1 file not found in Path2" , prefix )
ok = false
}
}
for file := range checkFiles2 {
if ! checkFiles1 . Has ( file ) {
b . indentf ( "ERROR" , file , "%s Path2 file not found in Path1" , prefix )
ok = false
}
}
if ! ok {
return errors . New ( "check file check failed" )
}
fs . Infof ( nil , "Found %d matching %q files on both paths" , numChecks1 , opt . CheckFilename )
return nil
}
2023-10-07 04:38:47 +08:00
func ( b * bisyncRun ) testFn ( ) {
if b . opt . TestFn != nil {
b . opt . TestFn ( )
}
}
2023-10-07 18:33:43 +08:00
func ( b * bisyncRun ) handleErr ( o interface { } , msg string , err error , critical , retryable bool ) {
if err != nil {
if retryable {
b . retryable = true
}
if critical {
b . critical = true
fs . Errorf ( o , "%s: %v" , msg , err )
} else {
2023-11-09 18:04:33 +08:00
fs . Infof ( o , "%s: %v" , msg , err )
2023-10-07 18:33:43 +08:00
}
}
}
2023-11-12 23:34:38 +08:00
// setBackupDir overrides --backup-dir with path-specific version, if set, in each direction
func ( b * bisyncRun ) setBackupDir ( ctx context . Context , destPath int ) context . Context {
ci := fs . GetConfig ( ctx )
ci . BackupDir = b . opt . OrigBackupDir
if destPath == 1 && b . opt . BackupDir1 != "" {
ci . BackupDir = b . opt . BackupDir1
}
if destPath == 2 && b . opt . BackupDir2 != "" {
ci . BackupDir = b . opt . BackupDir1
}
fs . Debugf ( ci . BackupDir , "updated backup-dir for Path%d" , destPath )
return ctx
}