diff --git a/duplicacy/duplicacy_main.go b/duplicacy/duplicacy_main.go index 88cad5db..9f575bb6 100644 --- a/duplicacy/duplicacy_main.go +++ b/duplicacy/duplicacy_main.go @@ -1019,7 +1019,6 @@ func printFile(context *cli.Context) { snapshotID = context.String("id") } - backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false) duplicacy.SavePassword(*preference, "password", password) @@ -1266,7 +1265,7 @@ func copySnapshots(context *cli.Context) { destinationStorage.SetRateLimits(0, context.Int("upload-limit-rate")) destinationManager := duplicacy.CreateBackupManager(destination.SnapshotID, destinationStorage, repository, - destinationPassword, "", "", false) + destinationPassword, "", "", false) duplicacy.SavePassword(*destination, "password", destinationPassword) destinationManager.SetupSnapshotCache(destination.Name) @@ -1391,7 +1390,7 @@ func benchmark(context *cli.Context) { if storage == nil { return } - duplicacy.Benchmark(repository, storage, int64(fileSize) * 1024 * 1024, chunkSize * 1024 * 1024, chunkCount, uploadThreads, downloadThreads) + duplicacy.Benchmark(repository, storage, int64(fileSize)*1024*1024, chunkSize*1024*1024, chunkCount, uploadThreads, downloadThreads) } func main() { @@ -1569,7 +1568,7 @@ func main() { cli.BoolFlag{ Name: "persist", Usage: "continue processing despite chunk errors or existing files (without -overwrite), reporting any affected files", - }, + }, cli.StringFlag{ Name: "key-passphrase", Usage: "the passphrase to decrypt the RSA private key", @@ -2180,8 +2179,8 @@ func main() { Usage: "add a comment to identify the process", }, cli.StringSliceFlag{ - Name: "suppress, s", - Usage: "suppress logs with the specified id", + Name: "suppress, s", + Usage: "suppress logs with the specified id", Argument: "", }, cli.BoolFlag{ diff --git a/src/duplicacy_b2client.go b/src/duplicacy_b2client.go index aac2d80f..54dec4b7 100644 --- a/src/duplicacy_b2client.go +++ b/src/duplicacy_b2client.go @@ -5,22 +5,22 @@ package duplicacy import ( - "io" - "os" - "fmt" "bytes" - "time" - "sync" - "strconv" - "strings" - "net/url" - "net/http" - "math/rand" - "io/ioutil" "crypto/sha1" + "encoding/base64" "encoding/hex" "encoding/json" - "encoding/base64" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + "time" ) type B2Error struct { @@ -41,14 +41,14 @@ type B2UploadArgument struct { var B2AuthorizationURL = "https://api.backblazeb2.com/b2api/v1/b2_authorize_account" type B2Client struct { - HTTPClient *http.Client + HTTPClient *http.Client - AccountID string - ApplicationKeyID string - ApplicationKey string - BucketName string - BucketID string - StorageDir string + AccountID string + ApplicationKeyID string + ApplicationKey string + BucketName string + BucketID string + StorageDir string Lock sync.Mutex AuthorizationToken string @@ -56,12 +56,12 @@ type B2Client struct { DownloadURL string IsAuthorized bool - UploadURLs []string - UploadTokens []string + UploadURLs []string + UploadTokens []string - Threads int - MaximumRetries int - TestMode bool + Threads int + MaximumRetries int + TestMode bool LastAuthorizationTime int64 } @@ -81,7 +81,7 @@ func NewB2Client(applicationKeyID string, applicationKey string, downloadURL str storageDir = storageDir[1:] } - if storageDir != "" && storageDir[len(storageDir) - 1] != '/' { + if storageDir != "" && storageDir[len(storageDir)-1] != '/' { storageDir += "/" } @@ -128,7 +128,7 @@ func (client *B2Client) retry(retries int, response *http.Response) int { } } - if retries >= client.MaximumRetries + 1 { + if retries >= client.MaximumRetries+1 { return 0 } retries++ @@ -143,7 +143,7 @@ func (client *B2Client) retry(retries int, response *http.Response) int { } func (client *B2Client) call(threadIndex int, requestURL string, method string, requestHeaders map[string]string, input interface{}) ( - io.ReadCloser, http.Header, int64, error) { + io.ReadCloser, http.Header, int64, error) { var response *http.Response @@ -171,7 +171,6 @@ func (client *B2Client) call(threadIndex int, requestURL string, method string, inputReader = rateLimitedReader } - if isUpload { if client.UploadURLs[threadIndex] == "" || client.UploadTokens[threadIndex] == "" { err := client.getUploadURL(threadIndex) @@ -303,7 +302,7 @@ func (client *B2Client) AuthorizeAccount(threadIndex int) (err error, allowed bo defer client.Lock.Unlock() // Don't authorize if the previous one was done less than 30 seconds ago - if client.LastAuthorizationTime != 0 && client.LastAuthorizationTime > time.Now().Unix() - 30 { + if client.LastAuthorizationTime != 0 && client.LastAuthorizationTime > time.Now().Unix()-30 { return nil, false } @@ -426,7 +425,7 @@ func (client *B2Client) ListFileNames(threadIndex int, startFileName string, sin apiURL = client.getAPIURL() + "/b2api/v1/b2_list_file_versions" } else if singleFile { // handle a single file with no versions as a special case to download the last byte of the file - apiURL = client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + startFileName) + apiURL = client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir+startFileName) // requesting byte -1 works for empty files where 0-0 fails with a 416 error requestHeaders["Range"] = "bytes=-1" // HEAD request @@ -584,7 +583,7 @@ func (client *B2Client) HideFile(threadIndex int, fileName string) (fileID strin func (client *B2Client) DownloadFile(threadIndex int, filePath string) (io.ReadCloser, int64, error) { - url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + filePath) + url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir+filePath) readCloser, _, len, err := client.call(threadIndex, url, http.MethodGet, make(map[string]string), 0) return readCloser, len, err diff --git a/src/duplicacy_backupmanager.go b/src/duplicacy_backupmanager.go index d4f4c4df..1eac2443 100644 --- a/src/duplicacy_backupmanager.go +++ b/src/duplicacy_backupmanager.go @@ -36,9 +36,9 @@ type BackupManager struct { nobackupFile string // don't backup directory when this file name is found - filtersFile string // the path to the filters file + filtersFile string // the path to the filters file - excludeByAttribute bool // don't backup file based on file attribute + excludeByAttribute bool // don't backup file based on file attribute } @@ -117,7 +117,6 @@ func (manager *BackupManager) SetupSnapshotCache(storageName string) bool { return true } - // setEntryContent sets the 4 content pointers for each entry in 'entries'. 'offset' indicates the value // to be added to the StartChunk and EndChunk points, used when intending to append 'entries' to the // original unchanged entry list. @@ -193,7 +192,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta if manager.config.DataShards != 0 && manager.config.ParityShards != 0 { LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled with %d data shards and %d parity shards", - manager.config.DataShards, manager.config.ParityShards) + manager.config.DataShards, manager.config.ParityShards) } if manager.config.rsaPublicKey != nil && len(manager.config.FileKey) > 0 { @@ -217,7 +216,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta LOG_INFO("BACKUP_INDEXING", "Indexing %s", top) localSnapshot, skippedDirectories, skippedFiles, err := CreateSnapshotFromDirectory(manager.snapshotID, shadowTop, - manager.nobackupFile, manager.filtersFile, manager.excludeByAttribute) + manager.nobackupFile, manager.filtersFile, manager.excludeByAttribute) if err != nil { LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err) return false @@ -800,7 +799,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu manager.SnapshotManager.DownloadSnapshotContents(remoteSnapshot, patterns, true) localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top, manager.nobackupFile, - manager.filtersFile, manager.excludeByAttribute) + manager.filtersFile, manager.excludeByAttribute) if err != nil { LOG_ERROR("SNAPSHOT_LIST", "Failed to list the repository: %v", err) return 0 @@ -833,7 +832,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu var failedFiles int var skippedFileSize int64 var skippedFiles int64 - + var downloadedFiles []*Entry i := 0 @@ -1202,8 +1201,8 @@ func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *C // Restore downloads a file from the storage. If 'inPlace' is false, the download file is saved first to a temporary // file under the .duplicacy directory and then replaces the existing one. Otherwise, the existing file will be // overwritten directly. -// Return: true, nil: Restored file; -// false, nil: Skipped file; +// Return: true, nil: Restored file; +// false, nil: Skipped file; // false, error: Failure to restore file (only if allowFailures == true) func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chunkMaker *ChunkMaker, entry *Entry, top string, inPlace bool, overwrite bool, showStatistics bool, totalFileSize int64, downloadedFileSize int64, startTime int64, allowFailures bool) (bool, error) { @@ -1379,7 +1378,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun // fileHash != entry.Hash, warn/error depending on -overwrite option if !overwrite && !isNewFile { LOG_WERROR(allowFailures, "DOWNLOAD_OVERWRITE", - "File %s already exists. Please specify the -overwrite option to overwrite", entry.Path) + "File %s already exists. Please specify the -overwrite option to overwrite", entry.Path) return false, fmt.Errorf("file exists") } @@ -1625,7 +1624,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho if otherManager.config.DataShards != 0 && otherManager.config.ParityShards != 0 { LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled for the destination storage with %d data shards and %d parity shards", - otherManager.config.DataShards, otherManager.config.ParityShards) + otherManager.config.DataShards, otherManager.config.ParityShards) } if otherManager.config.rsaPublicKey != nil && len(otherManager.config.FileKey) > 0 { @@ -1712,7 +1711,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho } // These two maps store hashes of chunks in the source and destination storages, respectively. Note that - // the value of 'chunks' is used to indicated if the chunk is a snapshot chunk, while the value of 'otherChunks' + // the value of 'chunks' is used to indicated if the chunk is a snapshot chunk, while the value of 'otherChunks' // is not used. chunks := make(map[string]bool) otherChunks := make(map[string]bool) @@ -1726,15 +1725,15 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho LOG_TRACE("SNAPSHOT_COPY", "Copying snapshot %s at revision %d", snapshot.ID, snapshot.Revision) for _, chunkHash := range snapshot.FileSequence { - chunks[chunkHash] = true // The chunk is a snapshot chunk + chunks[chunkHash] = true // The chunk is a snapshot chunk } for _, chunkHash := range snapshot.ChunkSequence { - chunks[chunkHash] = true // The chunk is a snapshot chunk + chunks[chunkHash] = true // The chunk is a snapshot chunk } for _, chunkHash := range snapshot.LengthSequence { - chunks[chunkHash] = true // The chunk is a snapshot chunk + chunks[chunkHash] = true // The chunk is a snapshot chunk } description := manager.SnapshotManager.DownloadSequence(snapshot.ChunkSequence) @@ -1747,7 +1746,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho for _, chunkHash := range snapshot.ChunkHashes { if _, found := chunks[chunkHash]; !found { - chunks[chunkHash] = false // The chunk is a file chunk + chunks[chunkHash] = false // The chunk is a file chunk } } @@ -1779,7 +1778,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho } } - LOG_INFO("SNAPSHOT_COPY", "Chunks to copy: %d, to skip: %d, total: %d", len(chunksToCopy), len(chunks) - len(chunksToCopy), len(chunks)) + LOG_INFO("SNAPSHOT_COPY", "Chunks to copy: %d, to skip: %d, total: %d", len(chunksToCopy), len(chunks)-len(chunksToCopy), len(chunks)) chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, false, downloadingThreads, false) @@ -1799,11 +1798,11 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho elapsedTime := time.Now().Sub(startTime).Seconds() speed := int64(float64(atomic.LoadInt64(&uploadedBytes)) / elapsedTime) - remainingTime := int64(float64(len(chunksToCopy) - chunkIndex - 1) / float64(chunkIndex + 1) * elapsedTime) - percentage := float64(chunkIndex + 1) / float64(len(chunksToCopy)) * 100.0 + remainingTime := int64(float64(len(chunksToCopy)-chunkIndex-1) / float64(chunkIndex+1) * elapsedTime) + percentage := float64(chunkIndex+1) / float64(len(chunksToCopy)) * 100.0 LOG_INFO("COPY_PROGRESS", "%s chunk %s (%d/%d) %sB/s %s %.1f%%", - action, chunk.GetID(), chunkIndex + 1, len(chunksToCopy), - PrettySize(speed), PrettyTime(remainingTime), percentage) + action, chunk.GetID(), chunkIndex+1, len(chunksToCopy), + PrettySize(speed), PrettyTime(remainingTime), percentage) otherManager.config.PutChunk(chunk) }) @@ -1827,7 +1826,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho chunkDownloader.Stop() chunkUploader.Stop() - LOG_INFO("SNAPSHOT_COPY", "Copied %d new chunks and skipped %d existing chunks", copiedChunks, len(chunks) - copiedChunks) + LOG_INFO("SNAPSHOT_COPY", "Copied %d new chunks and skipped %d existing chunks", copiedChunks, len(chunks)-copiedChunks) for _, snapshot := range snapshots { if revisionMap[snapshot.ID][snapshot.Revision] == false { diff --git a/src/duplicacy_backupmanager_test.go b/src/duplicacy_backupmanager_test.go index 988bf43f..c6694a67 100644 --- a/src/duplicacy_backupmanager_test.go +++ b/src/duplicacy_backupmanager_test.go @@ -477,9 +477,9 @@ func TestPersistRestore(t *testing.T) { maxFileSize := 1000000 //maxFileSize := 200000 - createRandomFileSeeded(testDir+"/repository1/file1", maxFileSize,1) - createRandomFileSeeded(testDir+"/repository1/file2", maxFileSize,2) - createRandomFileSeeded(testDir+"/repository1/dir1/file3", maxFileSize,3) + createRandomFileSeeded(testDir+"/repository1/file1", maxFileSize, 1) + createRandomFileSeeded(testDir+"/repository1/file2", maxFileSize, 2) + createRandomFileSeeded(testDir+"/repository1/dir1/file3", maxFileSize, 3) threads := 1 @@ -536,7 +536,6 @@ func TestPersistRestore(t *testing.T) { unencBackupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false) time.Sleep(time.Duration(delay) * time.Second) - // do encrypted backup SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy") encBackupManager := CreateBackupManager("host1", storage, testDir, password, "", "", false) @@ -546,7 +545,6 @@ func TestPersistRestore(t *testing.T) { encBackupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false) time.Sleep(time.Duration(delay) * time.Second) - // check snapshots unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "", /*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false, @@ -554,60 +552,60 @@ func TestPersistRestore(t *testing.T) { encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "", /*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false, - /*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, false) - + /*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, false) + // check functions checkAllUncorrupted := func(cmpRepository string) { - for _, f := range []string{"file1", "file2", "dir1/file3"} { - if _, err := os.Stat(testDir + cmpRepository + "/" + f); os.IsNotExist(err) { - t.Errorf("File %s does not exist", f) - continue - } + for _, f := range []string{"file1", "file2", "dir1/file3"} { + if _, err := os.Stat(testDir + cmpRepository + "/" + f); os.IsNotExist(err) { + t.Errorf("File %s does not exist", f) + continue + } - hash1 := getFileHash(testDir + "/repository1/" + f) - hash2 := getFileHash(testDir + cmpRepository + "/" + f) - if hash1 != hash2 { - t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2) - } + hash1 := getFileHash(testDir + "/repository1/" + f) + hash2 := getFileHash(testDir + cmpRepository + "/" + f) + if hash1 != hash2 { + t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2) + } } } checkMissingFile := func(cmpRepository string, expectMissing string) { - for _, f := range []string{"file1", "file2", "dir1/file3"} { - _, err := os.Stat(testDir + cmpRepository + "/" + f) - if err==nil { - if f==expectMissing { - t.Errorf("File %s exists, expected to be missing", f) - } - continue + for _, f := range []string{"file1", "file2", "dir1/file3"} { + _, err := os.Stat(testDir + cmpRepository + "/" + f) + if err == nil { + if f == expectMissing { + t.Errorf("File %s exists, expected to be missing", f) } - if os.IsNotExist(err) { - if f!=expectMissing { - t.Errorf("File %s does not exist", f) - } - continue + continue + } + if os.IsNotExist(err) { + if f != expectMissing { + t.Errorf("File %s does not exist", f) } + continue + } - hash1 := getFileHash(testDir + "/repository1/" + f) - hash2 := getFileHash(testDir + cmpRepository + "/" + f) - if hash1 != hash2 { - t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2) - } + hash1 := getFileHash(testDir + "/repository1/" + f) + hash2 := getFileHash(testDir + cmpRepository + "/" + f) + if hash1 != hash2 { + t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2) + } } } - checkCorruptedFile := func(cmpRepository string, expectCorrupted string) { + checkCorruptedFile := func(cmpRepository string, expectCorrupted string) { for _, f := range []string{"file1", "file2", "dir1/file3"} { if _, err := os.Stat(testDir + cmpRepository + "/" + f); os.IsNotExist(err) { t.Errorf("File %s does not exist", f) continue } - + hash1 := getFileHash(testDir + "/repository1/" + f) hash2 := getFileHash(testDir + cmpRepository + "/" + f) - if (f==expectCorrupted) { + if f == expectCorrupted { if hash1 == hash2 { t.Errorf("File %s has same hashes, expected to be corrupted: %s vs %s", f, hash1, hash2) } - + } else { if hash1 != hash2 { t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2) @@ -619,28 +617,28 @@ func TestPersistRestore(t *testing.T) { // test restore all uncorrupted to repository3 SetDuplicacyPreferencePath(testDir + "/repository3/.duplicacy") failedFiles := unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false, - /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false) + /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false) assertRestoreFailures(t, failedFiles, 0) checkAllUncorrupted("/repository3") // test for corrupt files and -persist - // corrupt a chunk + // corrupt a chunk chunkToCorrupt1 := "/4d/538e5dfd2b08e782bfeb56d1360fb5d7eb9d8c4b2531cc2fca79efbaec910c" - // this should affect file1 + // this should affect file1 chunkToCorrupt2 := "/2b/f953a766d0196ce026ae259e76e3c186a0e4bcd3ce10f1571d17f86f0a5497" - // this should affect dir1/file3 - + // this should affect dir1/file3 + for i := 0; i < 2; i++ { - if i==0 { + if i == 0 { // test corrupt chunks corruptFile(testDir+"/unenc_storage"+"/chunks"+chunkToCorrupt1, 128, 128, 4) corruptFile(testDir+"/enc_storage"+"/chunks"+chunkToCorrupt2, 128, 128, 4) } else { // test missing chunks - os.Remove(testDir+"/unenc_storage"+"/chunks"+chunkToCorrupt1) - os.Remove(testDir+"/enc_storage"+"/chunks"+chunkToCorrupt2) + os.Remove(testDir + "/unenc_storage" + "/chunks" + chunkToCorrupt1) + os.Remove(testDir + "/enc_storage" + "/chunks" + chunkToCorrupt2) } - + // check snapshots with --persist (allowFailures == true) // this would cause a panic and os.Exit from duplicacy_log if allowFailures == false unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "", @@ -651,9 +649,8 @@ func TestPersistRestore(t *testing.T) { /*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false, /*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, true) - // test restore corrupted, inPlace = true, corrupted files will have hash failures - os.RemoveAll(testDir+"/repository2") + os.RemoveAll(testDir + "/repository2") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false, /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) @@ -662,8 +659,7 @@ func TestPersistRestore(t *testing.T) { // check restore, expect file1 to be corrupted checkCorruptedFile("/repository2", "file1") - - os.RemoveAll(testDir+"/repository2") + os.RemoveAll(testDir + "/repository2") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false, /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) @@ -674,7 +670,7 @@ func TestPersistRestore(t *testing.T) { //SetLoggingLevel(DEBUG) // test restore corrupted, inPlace = false, corrupted files will be missing - os.RemoveAll(testDir+"/repository2") + os.RemoveAll(testDir + "/repository2") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, false, /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) @@ -683,8 +679,7 @@ func TestPersistRestore(t *testing.T) { // check restore, expect file1 to be corrupted checkMissingFile("/repository2", "file1") - - os.RemoveAll(testDir+"/repository2") + os.RemoveAll(testDir + "/repository2") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, false, /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) @@ -696,7 +691,7 @@ func TestPersistRestore(t *testing.T) { // test restore corrupted files from different backups, inPlace = true // with overwrite=true, corrupted file1 from unenc will be restored correctly from enc // the latter will not touch the existing file3 with correct hash - os.RemoveAll(testDir+"/repository2") + os.RemoveAll(testDir + "/repository2") failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false, /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) assertRestoreFailures(t, failedFiles, 1) @@ -720,4 +715,4 @@ func TestPersistRestore(t *testing.T) { checkAllUncorrupted("/repository3") } -} \ No newline at end of file +} diff --git a/src/duplicacy_chunk.go b/src/duplicacy_chunk.go index 860036f0..1af195b6 100644 --- a/src/duplicacy_chunk.go +++ b/src/duplicacy_chunk.go @@ -8,13 +8,13 @@ import ( "bytes" "compress/zlib" "crypto/aes" - "crypto/rsa" "crypto/cipher" "crypto/hmac" "crypto/rand" + "crypto/rsa" "crypto/sha256" - "encoding/hex" "encoding/binary" + "encoding/hex" "fmt" "hash" "io" @@ -22,8 +22,8 @@ import ( "runtime" "github.com/bkaradzic/go-lz4" - "github.com/minio/highwayhash" "github.com/klauspost/reedsolomon" + "github.com/minio/highwayhash" ) // A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore @@ -66,8 +66,8 @@ type Chunk struct { // by the config isSnapshot bool // Indicates if the chunk is a snapshot chunk (instead of a file chunk). This is only used by RSA - // encryption, where a snapshot chunk is not encrypted by RSA - + // encryption, where a snapshot chunk is not encrypted by RSA + isBroken bool // Indicates the chunk did not download correctly. This is only used for -persist (allowFailures) mode } @@ -231,7 +231,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh // Start with the magic number and the version number. if usingRSA { // RSA encryption starts "duplicacy\002" - encryptedBuffer.Write([]byte(ENCRYPTION_BANNER)[:len(ENCRYPTION_BANNER) - 1]) + encryptedBuffer.Write([]byte(ENCRYPTION_BANNER)[:len(ENCRYPTION_BANNER)-1]) encryptedBuffer.Write([]byte{ENCRYPTION_VERSION_RSA}) // Then the encrypted key @@ -310,13 +310,13 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh chunkSize := len(encryptedBuffer.Bytes()) shardSize := (chunkSize + chunk.config.DataShards - 1) / chunk.config.DataShards // Append zeros to make the last shard to have the same size as other - encryptedBuffer.Write(make([]byte, shardSize * chunk.config.DataShards - chunkSize)) + encryptedBuffer.Write(make([]byte, shardSize*chunk.config.DataShards-chunkSize)) // Grow the buffer for parity shards encryptedBuffer.Grow(shardSize * chunk.config.ParityShards) // Now create one slice for each shard, reusing the data in the buffer - data := make([][]byte, chunk.config.DataShards + chunk.config.ParityShards) - for i := 0; i < chunk.config.DataShards + chunk.config.ParityShards; i++ { - data[i] = encryptedBuffer.Bytes()[i * shardSize: (i + 1) * shardSize] + data := make([][]byte, chunk.config.DataShards+chunk.config.ParityShards) + for i := 0; i < chunk.config.DataShards+chunk.config.ParityShards; i++ { + data[i] = encryptedBuffer.Bytes()[i*shardSize : (i+1)*shardSize] } // This populates the parity shard encoder.Encode(data) @@ -387,13 +387,13 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err if len(encryptedBuffer.Bytes()) > bannerLength && string(encryptedBuffer.Bytes()[:bannerLength]) == ERASURE_CODING_BANNER { // The chunk was encoded with erasure coding - if len(encryptedBuffer.Bytes()) < bannerLength + 14 { + if len(encryptedBuffer.Bytes()) < bannerLength+14 { return fmt.Errorf("Erasure coding header truncated (%d bytes)", len(encryptedBuffer.Bytes())) } // Check the header checksum - header := encryptedBuffer.Bytes()[bannerLength: bannerLength + 14] - if header[12] != header[0] ^ header[2] ^ header[4] ^ header[6] ^ header[8] ^ header[10] || - header[13] != header[1] ^ header[3] ^ header[5] ^ header[7] ^ header[9] ^ header[11] { + header := encryptedBuffer.Bytes()[bannerLength : bannerLength+14] + if header[12] != header[0]^header[2]^header[4]^header[6]^header[8]^header[10] || + header[13] != header[1]^header[3]^header[5]^header[7]^header[9]^header[11] { return fmt.Errorf("Erasure coding header corrupted (%x)", header) } @@ -403,9 +403,9 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err parityShards := int(binary.LittleEndian.Uint16(header[10:12])) shardSize := (chunkSize + chunk.config.DataShards - 1) / chunk.config.DataShards // This is the length the chunk file should have - expectedLength := bannerLength + 2 * len(header) + (dataShards + parityShards) * (shardSize + 32) + expectedLength := bannerLength + 2*len(header) + (dataShards+parityShards)*(shardSize+32) // The minimum length that can be recovered from - minimumLength := bannerLength + len(header) + (dataShards + parityShards) * 32 + dataShards * shardSize + minimumLength := bannerLength + len(header) + (dataShards+parityShards)*32 + dataShards*shardSize LOG_DEBUG("CHUNK_ERASURECODE", "Chunk size: %d bytes, data size: %d, parity: %d/%d", chunkSize, len(encryptedBuffer.Bytes()), dataShards, parityShards) if len(encryptedBuffer.Bytes()) > expectedLength { LOG_WARN("CHUNK_ERASURECODE", "Chunk has %d bytes (instead of %d)", len(encryptedBuffer.Bytes()), expectedLength) @@ -420,15 +420,15 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err // Where the hashes start hashOffset := bannerLength + len(header) // Where the data start - dataOffset := hashOffset + (dataShards + parityShards) * 32 + dataOffset := hashOffset + (dataShards+parityShards)*32 - data := make([][]byte, dataShards + parityShards) + data := make([][]byte, dataShards+parityShards) recoveryNeeded := false hashKey := make([]byte, 32) availableShards := 0 - for i := 0; i < dataShards + parityShards; i++ { - start := dataOffset + i * shardSize - if start + shardSize > len(encryptedBuffer.Bytes()) { + for i := 0; i < dataShards+parityShards; i++ { + start := dataOffset + i*shardSize + if start+shardSize > len(encryptedBuffer.Bytes()) { // the current shard is incomplete break } @@ -437,17 +437,17 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err if err != nil { return err } - _, err = hasher.Write(encryptedBuffer.Bytes()[start: start + shardSize]) + _, err = hasher.Write(encryptedBuffer.Bytes()[start : start+shardSize]) if err != nil { return err } - if bytes.Compare(hasher.Sum(nil), encryptedBuffer.Bytes()[hashOffset + i * 32: hashOffset + (i + 1) * 32]) != 0 { + if bytes.Compare(hasher.Sum(nil), encryptedBuffer.Bytes()[hashOffset+i*32:hashOffset+(i+1)*32]) != 0 { if i < dataShards { recoveryNeeded = true } } else { // The shard is good - data[i] = encryptedBuffer.Bytes()[start: start + shardSize] + data[i] = encryptedBuffer.Bytes()[start : start+shardSize] availableShards++ if availableShards >= dataShards { // We have enough shards to recover; skip the remaining shards @@ -463,7 +463,7 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err encryptedBuffer.Read(encryptedBuffer.Bytes()[:dataOffset]) } else { if availableShards < dataShards { - return fmt.Errorf("Not enough chunk data for recover; only %d out of %d shards are complete", availableShards, dataShards + parityShards) + return fmt.Errorf("Not enough chunk data for recover; only %d out of %d shards are complete", availableShards, dataShards+parityShards) } // Show the validity of shards using a string of * and - @@ -515,7 +515,7 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err key = hasher.Sum(nil) } - if len(encryptedBuffer.Bytes()) < bannerLength + 12 { + if len(encryptedBuffer.Bytes()) < bannerLength+12 { return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes())) } @@ -534,13 +534,13 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err return fmt.Errorf("An RSA private key is required to decrypt the chunk") } - encryptedKeyLength := binary.LittleEndian.Uint16(encryptedBuffer.Bytes()[bannerLength:bannerLength+2]) + encryptedKeyLength := binary.LittleEndian.Uint16(encryptedBuffer.Bytes()[bannerLength : bannerLength+2]) - if len(encryptedBuffer.Bytes()) < bannerLength + 14 + int(encryptedKeyLength) { + if len(encryptedBuffer.Bytes()) < bannerLength+14+int(encryptedKeyLength) { return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes())) } - encryptedKey := encryptedBuffer.Bytes()[bannerLength + 2:bannerLength + 2 + int(encryptedKeyLength)] + encryptedKey := encryptedBuffer.Bytes()[bannerLength+2 : bannerLength+2+int(encryptedKeyLength)] bannerLength += 2 + int(encryptedKeyLength) decryptedKey, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, chunk.config.rsaPrivateKey, encryptedKey, nil) diff --git a/src/duplicacy_chunk_test.go b/src/duplicacy_chunk_test.go index dea0605f..81eef264 100644 --- a/src/duplicacy_chunk_test.go +++ b/src/duplicacy_chunk_test.go @@ -21,7 +21,7 @@ func TestErasureCoding(t *testing.T) { config.MinimumChunkSize = 100 config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL config.DataShards = 5 - config.ParityShards = 2 + config.ParityShards = 2 chunk := CreateChunk(config, true) chunk.Reset(true) @@ -99,13 +99,13 @@ func TestChunkBasic(t *testing.T) { copy(encryptedData, chunk.GetBytes()) if testErasureCoding { - offset := 24 + 32 * 7 - start := rand.Int() % (len(encryptedData) - offset) + offset + offset := 24 + 32*7 + start := rand.Int()%(len(encryptedData)-offset) + offset length := (len(encryptedData) - offset) / 7 - if start + length > len(encryptedData) { + if start+length > len(encryptedData) { length = len(encryptedData) - start } - crypto_rand.Read(encryptedData[start: start+length]) + crypto_rand.Read(encryptedData[start : start+length]) } chunk.Reset(false) diff --git a/src/duplicacy_chunkdownloader.go b/src/duplicacy_chunkdownloader.go index b4c05790..3cc8f8d5 100644 --- a/src/duplicacy_chunkdownloader.go +++ b/src/duplicacy_chunkdownloader.go @@ -31,8 +31,8 @@ type ChunkDownloadCompletion struct { // corresponding ChunkDownloadTask is sent to the dowloading goroutine. Once a chunk is downloaded, it will be // inserted in the completed task list. type ChunkDownloader struct { - totalChunkSize int64 // Total chunk size - downloadedChunkSize int64 // Downloaded chunk size + totalChunkSize int64 // Total chunk size + downloadedChunkSize int64 // Downloaded chunk size config *Config // Associated config storage Storage // Download from this storage @@ -54,7 +54,7 @@ type ChunkDownloader struct { numberOfDownloadingChunks int // The number of chunks still being downloaded numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed - NumberOfFailedChunks int // The number of chunks that can't be downloaded + NumberOfFailedChunks int // The number of chunks that can't be downloaded } func CreateChunkDownloader(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int, allowFailures bool) *ChunkDownloader { @@ -276,7 +276,7 @@ func (downloader *ChunkDownloader) WaitForCompletion() { } // Looping until there isn't a download task in progress - for downloader.numberOfActiveChunks > 0 || downloader.lastChunkIndex + 1 < len(downloader.taskList) { + for downloader.numberOfActiveChunks > 0 || downloader.lastChunkIndex+1 < len(downloader.taskList) { // Wait for a completion event first if downloader.numberOfActiveChunks > 0 { @@ -291,8 +291,8 @@ func (downloader *ChunkDownloader) WaitForCompletion() { } // Pass the tasks one by one to the download queue - if downloader.lastChunkIndex + 1 < len(downloader.taskList) { - task := &downloader.taskList[downloader.lastChunkIndex + 1] + if downloader.lastChunkIndex+1 < len(downloader.taskList) { + task := &downloader.taskList[downloader.lastChunkIndex+1] if task.isDownloading { downloader.lastChunkIndex++ continue @@ -317,7 +317,7 @@ func (downloader *ChunkDownloader) Stop() { if completion.chunk.isBroken { downloader.NumberOfFailedChunks++ } -} + } for i := range downloader.completedTasks { downloader.config.PutChunk(downloader.taskList[i].chunk) @@ -420,7 +420,7 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT completeFailedChunk(chunk) // A chunk is not found. This is a serious error and hopefully it will never happen. if err != nil { - LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err) + LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err) } else { LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID) } diff --git a/src/duplicacy_config.go b/src/duplicacy_config.go index 568aa774..ce304220 100644 --- a/src/duplicacy_config.go +++ b/src/duplicacy_config.go @@ -8,8 +8,8 @@ import ( "bytes" "crypto/hmac" "crypto/rand" - "crypto/sha256" "crypto/rsa" + "crypto/sha256" "crypto/x509" "encoding/binary" "encoding/hex" @@ -17,13 +17,13 @@ import ( "encoding/pem" "fmt" "hash" + "io/ioutil" "os" - "strings" + "reflect" "runtime" "runtime/debug" + "strings" "sync/atomic" - "io/ioutil" - "reflect" blake2 "github.com/minio/blake2b-simd" ) @@ -72,12 +72,12 @@ type Config struct { FileKey []byte `json:"-"` // for erasure coding - DataShards int `json:'data-shards'` - ParityShards int `json:'parity-shards'` + DataShards int `json:"data-shards"` + ParityShards int `json:"parity-shards"` // for RSA encryption rsaPrivateKey *rsa.PrivateKey - rsaPublicKey *rsa.PublicKey + rsaPublicKey *rsa.PublicKey chunkPool chan *Chunk numberOfChunks int32 @@ -89,17 +89,17 @@ type aliasedConfig Config type jsonableConfig struct { *aliasedConfig - ChunkSeed string `json:"chunk-seed"` - HashKey string `json:"hash-key"` - IDKey string `json:"id-key"` - ChunkKey string `json:"chunk-key"` - FileKey string `json:"file-key"` + ChunkSeed string `json:"chunk-seed"` + HashKey string `json:"hash-key"` + IDKey string `json:"id-key"` + ChunkKey string `json:"chunk-key"` + FileKey string `json:"file-key"` RSAPublicKey string `json:"rsa-public-key"` } func (config *Config) MarshalJSON() ([]byte, error) { - publicKey := []byte {} + publicKey := []byte{} if config.rsaPublicKey != nil { publicKey, _ = x509.MarshalPKIXPublicKey(config.rsaPublicKey) } diff --git a/src/duplicacy_entry_test.go b/src/duplicacy_entry_test.go index 57d76346..f3da5463 100644 --- a/src/duplicacy_entry_test.go +++ b/src/duplicacy_entry_test.go @@ -2,6 +2,8 @@ // Free for personal use and commercial trial // Commercial use requires per-user licenses available from https://duplicacy.com +//go:build linux || darwin || freebsd + package duplicacy import ( diff --git a/src/duplicacy_filefabricstorage.go b/src/duplicacy_filefabricstorage.go index a9574afd..6987f9dd 100644 --- a/src/duplicacy_filefabricstorage.go +++ b/src/duplicacy_filefabricstorage.go @@ -1,5 +1,5 @@ // Copyright (c) Storage Made Easy. All rights reserved. -// +// // This storage backend is contributed by Storage Made Easy (https://storagemadeeasy.com/) to be used in // Duplicacy and its derivative works. // @@ -7,45 +7,45 @@ package duplicacy import ( - "io" - "fmt" - "time" - "sync" "bytes" + "encoding/xml" "errors" - "strings" - "net/url" - "net/http" - "math/rand" + "fmt" + "io" "io/ioutil" - "encoding/xml" - "path/filepath" + "math/rand" "mime/multipart" + "net/http" + "net/url" + "path/filepath" + "strings" + "sync" + "time" ) // The XML element representing a file returned by the File Fabric server type FileFabricFile struct { - XMLName xml.Name - ID string `xml:"fi_id"` - Path string `xml:"path"` - Size int64 `xml:"fi_size"` - Type int `xml:"fi_type"` + XMLName xml.Name + ID string `xml:"fi_id"` + Path string `xml:"path"` + Size int64 `xml:"fi_size"` + Type int `xml:"fi_type"` } // The XML element representing a file list returned by the server type FileFabricFileList struct { - XMLName xml.Name `xml:"files"` - Files []FileFabricFile `xml:",any"` + XMLName xml.Name `xml:"files"` + Files []FileFabricFile `xml:",any"` } type FileFabricStorage struct { StorageBase - endpoint string // the server - authToken string // the authentication token - accessToken string // the access token (as returned by getTokenByAuthToken) - storageDir string // the path of the storage directory - storageDirID string // the id of 'storageDir' + endpoint string // the server + authToken string // the authentication token + accessToken string // the access token (as returned by getTokenByAuthToken) + storageDir string // the path of the storage directory + storageDirID string // the id of 'storageDir' client *http.Client // the default http client threads int // number of threads @@ -53,18 +53,18 @@ type FileFabricStorage struct { directoryCache map[string]string // stores ids for directories known to this backend directoryCacheLock sync.Mutex // lock for accessing directoryCache - isAuthorized bool - testMode bool + isAuthorized bool + testMode bool } var ( errFileFabricAuthorizationFailure = errors.New("Authentication failure") - errFileFabricDirectoryExists = errors.New("Directory exists") + errFileFabricDirectoryExists = errors.New("Directory exists") ) // The general server response type FileFabricResponse struct { - Status string `xml:"status"` + Status string `xml:"status"` Message string `xml:"statusmessage"` } @@ -128,26 +128,26 @@ func CreateFileFabricStorage(endpoint string, token string, storageDir string, t } // Retrieve the access token using an auth token -func (storage *FileFabricStorage) getAccessToken() (error) { +func (storage *FileFabricStorage) getAccessToken() error { - formData := url.Values { "authtoken": {storage.authToken},} + formData := url.Values{"authtoken": {storage.authToken}} readCloser, _, _, err := storage.sendRequest(0, http.MethodPost, storage.getAPIURL("getTokenByAuthToken"), nil, formData) if err != nil { return err } defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(ioutil.Discard, readCloser) - var output struct { + var output struct { FileFabricResponse - Token string `xml:"token"` - } + Token string `xml:"token"` + } - err = xml.NewDecoder(readCloser).Decode(&output) - if err != nil { - return err - } + err = xml.NewDecoder(readCloser).Decode(&output) + if err != nil { + return err + } err = checkFileFabricResponse(output.FileFabricResponse, "request the access token") if err != nil { @@ -155,7 +155,7 @@ func (storage *FileFabricStorage) getAccessToken() (error) { } storage.accessToken = output.Token - return nil + return nil } // Determine if we should retry based on the number of retries given by 'retry' and if so calculate the delay with exponential backoff @@ -171,13 +171,13 @@ func (storage *FileFabricStorage) shouldRetry(retry int, messageFormat string, m backoff = 60 } delay := rand.Intn(backoff*500) + backoff*500 - LOG_INFO("FILEFABRIC_RETRY", "%s; retrying after %.1f seconds", message, float32(delay) / 1000.0) + LOG_INFO("FILEFABRIC_RETRY", "%s; retrying after %.1f seconds", message, float32(delay)/1000.0) time.Sleep(time.Duration(delay) * time.Millisecond) return true } // Send a request to the server -func (storage *FileFabricStorage) sendRequest(threadIndex int, method string, requestURL string, requestHeaders map[string]string, input interface{}) ( io.ReadCloser, http.Header, int64, error) { +func (storage *FileFabricStorage) sendRequest(threadIndex int, method string, requestURL string, requestHeaders map[string]string, input interface{}) (io.ReadCloser, http.Header, int64, error) { var response *http.Response @@ -229,13 +229,13 @@ func (storage *FileFabricStorage) sendRequest(threadIndex int, method string, re } defer response.Body.Close() - defer io.Copy(ioutil.Discard, response.Body) + defer io.Copy(ioutil.Discard, response.Body) var output struct { - Status string `xml:"status"` + Status string `xml:"status"` Message string `xml:"statusmessage"` } - + err = xml.NewDecoder(response.Body).Decode(&output) if err != nil { if !storage.shouldRetry(retries, "[%d] %s %s returned an invalid response: %v", threadIndex, method, requestURL, err) { @@ -279,7 +279,7 @@ func (storage *FileFabricStorage) ListFiles(threadIndex int, dir string) (files lastID := "" for { - formData := url.Values { "marker": {lastID}, "limit": {"1000"}, "includefolders": {"n"}, "fi_pid" : {dirID}} + formData := url.Values{"marker": {lastID}, "limit": {"1000"}, "includefolders": {"n"}, "fi_pid": {dirID}} if dir == "snapshots/" { formData["includefolders"] = []string{"y"} } @@ -293,12 +293,12 @@ func (storage *FileFabricStorage) ListFiles(threadIndex int, dir string) (files } defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(ioutil.Discard, readCloser) var output struct { FileFabricResponse - FileList FileFabricFileList `xml:"files"` - Truncated int `xml:"truncated"` + FileList FileFabricFileList `xml:"files"` + Truncated int `xml:"truncated"` } err = xml.NewDecoder(readCloser).Decode(&output) @@ -314,7 +314,7 @@ func (storage *FileFabricStorage) ListFiles(threadIndex int, dir string) (files if dir == "snapshots/" { for _, file := range output.FileList.Files { if file.Type == 1 { - files = append(files, file.Path + "/") + files = append(files, file.Path+"/") } lastID = file.ID } @@ -338,7 +338,7 @@ func (storage *FileFabricStorage) ListFiles(threadIndex int, dir string) (files // getFileInfo returns the information about the file or directory at 'filePath'. func (storage *FileFabricStorage) getFileInfo(threadIndex int, filePath string) (fileID string, isDir bool, size int64, err error) { - formData := url.Values { "path" : {storage.storageDir + filePath}} + formData := url.Values{"path": {storage.storageDir + filePath}} readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("checkPathExists"), nil, formData) if err != nil { @@ -346,12 +346,12 @@ func (storage *FileFabricStorage) getFileInfo(threadIndex int, filePath string) } defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(ioutil.Discard, readCloser) var output struct { FileFabricResponse - File FileFabricFile `xml:"file"` - Exists string `xml:"exists"` + File FileFabricFile `xml:"file"` + Exists string `xml:"exists"` } err = xml.NewDecoder(readCloser).Decode(&output) @@ -371,7 +371,7 @@ func (storage *FileFabricStorage) getFileInfo(threadIndex int, filePath string) for filePath != "" && filePath[len(filePath)-1] == '/' { filePath = filePath[:len(filePath)-1] } - + storage.directoryCacheLock.Lock() storage.directoryCache[filePath] = output.File.ID storage.directoryCacheLock.Unlock() @@ -396,7 +396,7 @@ func (storage *FileFabricStorage) DeleteFile(threadIndex int, filePath string) ( return nil } - formData := url.Values { "fi_id" : {fileID}} + formData := url.Values{"fi_id": {fileID}} readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doDeleteFile"), nil, formData) if err != nil { @@ -404,7 +404,7 @@ func (storage *FileFabricStorage) DeleteFile(threadIndex int, filePath string) ( } defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(ioutil.Discard, readCloser) var output FileFabricResponse @@ -428,7 +428,7 @@ func (storage *FileFabricStorage) MoveFile(threadIndex int, from string, to stri return nil } - formData := url.Values { "fi_id" : {fileID}, "fi_name": {filepath.Base(to)},} + formData := url.Values{"fi_id": {fileID}, "fi_name": {filepath.Base(to)}} readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doRenameFile"), nil, formData) if err != nil { @@ -436,7 +436,7 @@ func (storage *FileFabricStorage) MoveFile(threadIndex int, from string, to stri } defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(ioutil.Discard, readCloser) var output FileFabricResponse @@ -449,7 +449,7 @@ func (storage *FileFabricStorage) MoveFile(threadIndex int, from string, to stri if err != nil { return err } - + return nil } @@ -473,7 +473,7 @@ func (storage *FileFabricStorage) createParentDirectory(threadIndex int, dir str parentID, err = storage.createDirectory(threadIndex, parent) if err != nil { if err == errFileFabricDirectoryExists { - var isDir bool + var isDir bool parentID, isDir, _, err = storage.getFileInfo(threadIndex, parent) if err != nil { return "", err @@ -503,7 +503,7 @@ func (storage *FileFabricStorage) createDirectory(threadIndex int, dir string) ( return "", err } - formData := url.Values { "fi_name": {filepath.Base(dir)}, "fi_pid" : {parentID}} + formData := url.Values{"fi_name": {filepath.Base(dir)}, "fi_pid": {parentID}} readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doCreateNewFolder"), nil, formData) if err != nil { @@ -511,7 +511,7 @@ func (storage *FileFabricStorage) createDirectory(threadIndex int, dir string) ( } defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(ioutil.Discard, readCloser) var output struct { FileFabricResponse @@ -545,7 +545,7 @@ func (storage *FileFabricStorage) CreateDirectory(threadIndex int, dir string) ( // DownloadFile reads the file at 'filePath' into the chunk. func (storage *FileFabricStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) { - formData := url.Values { "fi_id" : {storage.storageDir + filePath}} + formData := url.Values{"fi_id": {storage.storageDir + filePath}} readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("getFile"), nil, formData) if err != nil { @@ -553,7 +553,7 @@ func (storage *FileFabricStorage) DownloadFile(threadIndex int, filePath string, } defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(ioutil.Discard, readCloser) _, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.threads) return err } @@ -567,15 +567,15 @@ func (storage *FileFabricStorage) UploadFile(threadIndex int, filePath string, c } fileName := filepath.Base(filePath) - requestBody := &bytes.Buffer{} - writer := multipart.NewWriter(requestBody) - part, _ := writer.CreateFormFile("file_1", fileName) - part.Write(content) + requestBody := &bytes.Buffer{} + writer := multipart.NewWriter(requestBody) + part, _ := writer.CreateFormFile("file_1", fileName) + part.Write(content) - writer.WriteField("file_name1", fileName) + writer.WriteField("file_name1", fileName) writer.WriteField("fi_pid", parentID) writer.WriteField("fi_structtype", "g") - writer.Close() + writer.Close() headers := make(map[string]string) headers["Content-Type"] = writer.FormDataContentType() @@ -584,7 +584,7 @@ func (storage *FileFabricStorage) UploadFile(threadIndex int, filePath string, c readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doUploadFiles"), headers, rateLimitedReader) defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(ioutil.Discard, readCloser) var output FileFabricResponse diff --git a/src/duplicacy_filestorage.go b/src/duplicacy_filestorage.go index fe9387bb..0cd58c79 100644 --- a/src/duplicacy_filestorage.go +++ b/src/duplicacy_filestorage.go @@ -79,7 +79,7 @@ func (storage *FileStorage) ListFiles(threadIndex int, dir string) (files []stri for _, f := range list { name := f.Name() - if (f.IsDir() || f.Mode() & os.ModeSymlink != 0) && name[len(name)-1] != '/' { + if (f.IsDir() || f.Mode()&os.ModeSymlink != 0) && name[len(name)-1] != '/' { name += "/" } files = append(files, name) @@ -165,7 +165,7 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content return err } } else { - if !stat.IsDir() && stat.Mode() & os.ModeSymlink == 0 { + if !stat.IsDir() && stat.Mode()&os.ModeSymlink == 0 { return fmt.Errorf("The path %s is not a directory or symlink", dir) } } diff --git a/src/duplicacy_gcdstorage.go b/src/duplicacy_gcdstorage.go index af86a6e5..73521e39 100644 --- a/src/duplicacy_gcdstorage.go +++ b/src/duplicacy_gcdstorage.go @@ -18,7 +18,7 @@ import ( "sync" "time" - "golang.org/x/net/context" + "context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/api/drive/v3" @@ -38,8 +38,8 @@ type GCDStorage struct { service *drive.Service idCache map[string]string // only directories are saved in this cache idCacheLock sync.Mutex - backoffs []int // desired backoff time in seconds for each thread - attempts []int // number of failed attempts since last success for each thread + backoffs []int // desired backoff time in seconds for each thread + attempts []int // number of failed attempts since last success for each thread driveID string // the ID of the shared drive or 'root' (GCDUserDrive) if the user's drive spaces string // 'appDataFolder' if scope is drive.appdata; 'drive' otherwise @@ -382,7 +382,7 @@ func CreateGCDStorage(tokenFile string, driveID string, storagePath string, thre } if subject, ok := object["subject"]; ok { - config.Subject = subject.(string) + config.Subject = subject.(string) } tokenSource = config.TokenSource(ctx) @@ -442,7 +442,6 @@ func CreateGCDStorage(tokenFile string, driveID string, storagePath string, thre storage.attempts[i] = 0 } - if scope == drive.DriveAppdataScope { storage.spaces = "appDataFolder" storage.savePathID("", "appDataFolder") @@ -536,7 +535,7 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i } return files, nil, nil } else { - lock := sync.Mutex {} + lock := sync.Mutex{} allFiles := []string{} allSizes := []int64{} @@ -564,8 +563,8 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i LOG_DEBUG("GCD_STORAGE", "Listing %s; %d items returned", parent, len(entries)) - files := []string {} - sizes := []int64 {} + files := []string{} + sizes := []int64{} for _, entry := range entries { if entry.MimeType != GCDDirectoryMimeType { name := entry.Name @@ -579,7 +578,7 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i files = append(files, name) sizes = append(sizes, entry.Size) } else { - directoryChannel <- parent+"/"+entry.Name + directoryChannel <- parent + "/" + entry.Name storage.savePathID(parent+"/"+entry.Name, entry.Id) } } @@ -588,14 +587,14 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i allSizes = append(allSizes, sizes...) lock.Unlock() directoryChannel <- "" - } (parent) + }(parent) } if activeWorkers > 0 { select { - case err := <- errorChannel: + case err := <-errorChannel: return nil, nil, err - case directory := <- directoryChannel: + case directory := <-directoryChannel: if directory == "" { activeWorkers-- } else { diff --git a/src/duplicacy_gcsstorage.go b/src/duplicacy_gcsstorage.go index fc280926..51a62ca0 100644 --- a/src/duplicacy_gcsstorage.go +++ b/src/duplicacy_gcsstorage.go @@ -15,7 +15,7 @@ import ( "time" gcs "cloud.google.com/go/storage" - "golang.org/x/net/context" + "context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" diff --git a/src/duplicacy_keyring_windows.go b/src/duplicacy_keyring_windows.go index cba4c50e..86ed7f71 100644 --- a/src/duplicacy_keyring_windows.go +++ b/src/duplicacy_keyring_windows.go @@ -45,13 +45,14 @@ func keyringEncrypt(value []byte) ([]byte, error) { return nil, err } - address := uintptr(unsafe.Pointer(dataOut.pbData)) - defer procLocalFree.Call(address) - encryptedData := make([]byte, dataOut.cbData) + size := unsafe.Sizeof(byte(0)) for i := 0; i < len(encryptedData); i++ { - encryptedData[i] = *(*byte)(unsafe.Pointer(uintptr(int(address) + i))) + encryptedData[i] = *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(dataOut.pbData)) + uintptr(i)*size)) } + address := uintptr(unsafe.Pointer(dataOut.pbData)) + defer procLocalFree.Call(address) + return encryptedData, nil } @@ -69,14 +70,14 @@ func keyringDecrypt(value []byte) ([]byte, error) { return nil, err } - address := uintptr(unsafe.Pointer(dataOut.pbData)) - defer procLocalFree.Call(address) - decryptedData := make([]byte, dataOut.cbData) + size := unsafe.Sizeof(byte(0)) for i := 0; i < len(decryptedData); i++ { - address := int(uintptr(unsafe.Pointer(dataOut.pbData))) - decryptedData[i] = *(*byte)(unsafe.Pointer(uintptr(int(address) + i))) + decryptedData[i] = *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(dataOut.pbData)) + uintptr(i)*size)) } + address := uintptr(unsafe.Pointer(dataOut.pbData)) + + defer procLocalFree.Call(address) return decryptedData, nil } diff --git a/src/duplicacy_log.go b/src/duplicacy_log.go index 7daca420..613b4d8f 100644 --- a/src/duplicacy_log.go +++ b/src/duplicacy_log.go @@ -6,13 +6,13 @@ package duplicacy import ( "fmt" - "os" "log" + "os" + "regexp" "runtime/debug" "sync" "testing" "time" - "regexp" ) const ( @@ -115,7 +115,6 @@ func LOG_WERROR(isWarning bool, logID string, format string, v ...interface{}) { } } - func LOG_FATAL(logID string, format string, v ...interface{}) { logf(FATAL, logID, format, v...) } @@ -193,8 +192,8 @@ type Logger struct { func (logger *Logger) Write(line []byte) (n int, err error) { n = len(line) - for len(line) > 0 && line[len(line) - 1] == '\n' { - line = line[:len(line) - 1] + for len(line) > 0 && line[len(line)-1] == '\n' { + line = line[:len(line)-1] } matched := logger.formatRegex.FindStringSubmatch(string(line)) if matched != nil { @@ -203,12 +202,12 @@ func (logger *Logger) Write(line []byte) (n int, err error) { LOG_INFO("LOG_DEFAULT", "%s", line) } - return + return } func init() { log.SetFlags(0) - log.SetOutput(&Logger{ formatRegex: regexp.MustCompile(`^\[(.+)\]\s*(.+)`) }) + log.SetOutput(&Logger{formatRegex: regexp.MustCompile(`^\[(.+)\]\s*(.+)`)}) } const ( diff --git a/src/duplicacy_oneclient.go b/src/duplicacy_oneclient.go index ec0358c6..d97fad91 100644 --- a/src/duplicacy_oneclient.go +++ b/src/duplicacy_oneclient.go @@ -12,11 +12,11 @@ import ( "io/ioutil" "math/rand" "net/http" - "strings" + "path/filepath" "strconv" + "strings" "sync" "time" - "path/filepath" "golang.org/x/oauth2" ) @@ -44,9 +44,9 @@ type OneDriveClient struct { IsConnected bool TestMode bool - IsBusiness bool + IsBusiness bool RefreshTokenURL string - APIURL string + APIURL string } func NewOneDriveClient(tokenFile string, isBusiness bool) (*OneDriveClient, error) { @@ -118,7 +118,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{}, if reader, ok := inputReader.(*RateLimitedReader); ok { request.ContentLength = reader.Length() - request.Header.Set("Content-Range", fmt.Sprintf("bytes 0-%d/%d", reader.Length() - 1, reader.Length())) + request.Header.Set("Content-Range", fmt.Sprintf("bytes 0-%d/%d", reader.Length()-1, reader.Length())) } if url != client.RefreshTokenURL { @@ -182,10 +182,10 @@ func (client *OneDriveClient) call(url string, method string, input interface{}, } else if response.StatusCode == 409 { return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Conflict"} } else if response.StatusCode > 401 && response.StatusCode != 404 { - delay := int((rand.Float32() * 0.5 + 0.5) * 1000.0 * float32(backoff)) + delay := int((rand.Float32()*0.5 + 0.5) * 1000.0 * float32(backoff)) if backoffList, found := response.Header["Retry-After"]; found && len(backoffList) > 0 { retryAfter, _ := strconv.Atoi(backoffList[0]) - if retryAfter * 1000 > delay { + if retryAfter*1000 > delay { delay = retryAfter * 1000 } } @@ -330,7 +330,7 @@ func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit // Upload file using the simple method; this is only possible for OneDrive Personal or if the file // is smaller than 4MB for OneDrive Business - if !client.IsBusiness || (client.TestMode && rand.Int() % 2 == 0) { + if !client.IsBusiness || (client.TestMode && rand.Int()%2 == 0) { url := client.APIURL + "/drive/root:/" + path + ":/content" readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream") @@ -355,17 +355,17 @@ func (client *OneDriveClient) CreateUploadSession(path string) (uploadURL string type CreateUploadSessionItem struct { ConflictBehavior string `json:"@microsoft.graph.conflictBehavior"` - Name string `json:"name"` + Name string `json:"name"` } - input := map[string]interface{} { - "item": CreateUploadSessionItem { + input := map[string]interface{}{ + "item": CreateUploadSessionItem{ ConflictBehavior: "replace", - Name: filepath.Base(path), + Name: filepath.Base(path), }, } - readCloser, _, err := client.call(client.APIURL + "/drive/root:/" + path + ":/createUploadSession", "POST", input, "application/json") + readCloser, _, err := client.call(client.APIURL+"/drive/root:/"+path+":/createUploadSession", "POST", input, "application/json") if err != nil { return "", err } diff --git a/src/duplicacy_preference.go b/src/duplicacy_preference.go index fe24b220..9cfad2b1 100644 --- a/src/duplicacy_preference.go +++ b/src/duplicacy_preference.go @@ -15,18 +15,18 @@ import ( // Preference stores options for each storage. type Preference struct { - Name string `json:"name"` - SnapshotID string `json:"id"` - RepositoryPath string `json:"repository"` - StorageURL string `json:"storage"` - Encrypted bool `json:"encrypted"` - BackupProhibited bool `json:"no_backup"` - RestoreProhibited bool `json:"no_restore"` - DoNotSavePassword bool `json:"no_save_password"` - NobackupFile string `json:"nobackup_file"` - Keys map[string]string `json:"keys"` - FiltersFile string `json:"filters"` - ExcludeByAttribute bool `json:"exclude_by_attribute"` + Name string `json:"name"` + SnapshotID string `json:"id"` + RepositoryPath string `json:"repository"` + StorageURL string `json:"storage"` + Encrypted bool `json:"encrypted"` + BackupProhibited bool `json:"no_backup"` + RestoreProhibited bool `json:"no_restore"` + DoNotSavePassword bool `json:"no_save_password"` + NobackupFile string `json:"nobackup_file"` + Keys map[string]string `json:"keys"` + FiltersFile string `json:"filters"` + ExcludeByAttribute bool `json:"exclude_by_attribute"` } var preferencePath string diff --git a/src/duplicacy_sftpstorage.go b/src/duplicacy_sftpstorage.go index df379c1c..1c2cff1e 100644 --- a/src/duplicacy_sftpstorage.go +++ b/src/duplicacy_sftpstorage.go @@ -13,8 +13,8 @@ import ( "path" "runtime" "strings" - "time" "sync" + "time" "github.com/pkg/sftp" "golang.org/x/crypto/ssh" @@ -66,7 +66,7 @@ func CreateSFTPStorage(compatibilityMode bool, server string, port int, username "aes128-cbc", "3des-cbc", } - sftpConfig.KeyExchanges = [] string { + sftpConfig.KeyExchanges = []string{ "curve25519-sha256@libssh.org", "ecdh-sha2-nistp256", "ecdh-sha2-nistp384", "ecdh-sha2-nistp521", "diffie-hellman-group1-sha1", "diffie-hellman-group14-sha1", @@ -132,9 +132,9 @@ func (storage *SFTPStorage) getSFTPClient() *sftp.Client { return storage.client } -func (storage *SFTPStorage) retry(f func () error) error { +func (storage *SFTPStorage) retry(f func() error) error { delay := time.Second - for i := 0;; i++ { + for i := 0; ; i++ { err := f() if err != nil && strings.Contains(err.Error(), "EOF") && i < storage.numberOfTries { LOG_WARN("SFTP_RETRY", "Encountered an error (%v); retry after %d second(s)", err, delay/time.Second) @@ -163,6 +163,7 @@ func (storage *SFTPStorage) retry(f func () error) error { return err } } + // ListFiles return the list of files and subdirectories under 'file' (non-recursively) func (storage *SFTPStorage) ListFiles(threadIndex int, dirPath string) (files []string, sizes []int64, err error) { @@ -220,8 +221,10 @@ func (storage *SFTPStorage) MoveFile(threadIndex int, from string, to string) (e if fileInfo != nil { return fmt.Errorf("The destination file %s already exists", toPath) } - err = storage.retry(func() error { return storage.getSFTPClient().Rename(path.Join(storage.storageDir, from), - path.Join(storage.storageDir, to)) }) + err = storage.retry(func() error { + return storage.getSFTPClient().Rename(path.Join(storage.storageDir, from), + path.Join(storage.storageDir, to)) + }) return err } diff --git a/src/duplicacy_shadowcopy_darwin.go b/src/duplicacy_shadowcopy_darwin.go index 1d8a3b64..d8e56471 100755 --- a/src/duplicacy_shadowcopy_darwin.go +++ b/src/duplicacy_shadowcopy_darwin.go @@ -163,7 +163,7 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow return top } snapshotName := "com.apple.TimeMachine." + snapshotDate - + snapshotNameRegex := regexp.MustCompile(`(?m)^(.+` + snapshotDate + `.*)$`) matched = snapshotNameRegex.FindStringSubmatch(tmutilOutput) if len(matched) > 0 { @@ -171,7 +171,7 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow } else { LOG_INFO("VSS_CREATE", "Can't find the snapshot name with 'tmutil listlocalsnapshots'; fallback to %s", snapshotName) } - + // Mount snapshot as readonly and hide from GUI i.e. Finder _, err = CommandWithTimeout(timeoutInSeconds, "/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s="+snapshotName, "/System/Volumes/Data", snapshotPath) diff --git a/src/duplicacy_shadowcopy_windows.go b/src/duplicacy_shadowcopy_windows.go index 58d003f4..2b211a24 100644 --- a/src/duplicacy_shadowcopy_windows.go +++ b/src/duplicacy_shadowcopy_windows.go @@ -15,7 +15,11 @@ import ( ) //507C37B4-CF5B-4e95-B0AF-14EB9767467E -var IID_IVSS_ASYNC = &ole.GUID{0x507C37B4, 0xCF5B, 0x4e95, [8]byte{0xb0, 0xaf, 0x14, 0xeb, 0x97, 0x67, 0x46, 0x7e}} +var IID_IVSS_ASYNC = &ole.GUID{ + Data1: 0x507C37B4, + Data2: 0xCF5B, + Data3: 0x4e95, + Data4: [8]byte{0xb0, 0xaf, 0x14, 0xeb, 0x97, 0x67, 0x46, 0x7e}} type IVSSAsync struct { ole.IUnknown @@ -78,7 +82,11 @@ func getIVSSAsync(unknown *ole.IUnknown, iid *ole.GUID) (async *IVSSAsync) { } //665c1d5f-c218-414d-a05d-7fef5f9d5c86 -var IID_IVSS = &ole.GUID{0x665c1d5f, 0xc218, 0x414d, [8]byte{0xa0, 0x5d, 0x7f, 0xef, 0x5f, 0x9d, 0x5c, 0x86}} +var IID_IVSS = &ole.GUID{ + Data1: 0x665c1d5f, + Data2: 0xc218, + Data3: 0x414d, + Data4: [8]byte{0xa0, 0x5d, 0x7f, 0xef, 0x5f, 0x9d, 0x5c, 0x86}} type IVSS struct { ole.IUnknown @@ -240,13 +248,13 @@ type SnapshotProperties struct { func (vss *IVSS) GetSnapshotProperties(snapshotSetID ole.GUID, properties *SnapshotProperties) int { var ret uintptr if runtime.GOARCH == "386" { - address := uint(uintptr(unsafe.Pointer(&snapshotSetID))) + size := unsafe.Sizeof(uint32(4)) ret, _, _ = syscall.Syscall6(vss.VTable().getSnapshotProperties, 6, uintptr(unsafe.Pointer(vss)), - uintptr(*(*uint32)(unsafe.Pointer(uintptr(address)))), - uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 4)))), - uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 8)))), - uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 12)))), + uintptr(*(*uint32)(unsafe.Pointer(&snapshotSetID))), + uintptr(*(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&snapshotSetID)) + size))), + uintptr(*(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&snapshotSetID)) + 2*size))), + uintptr(*(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&snapshotSetID)) + 3*size))), uintptr(unsafe.Pointer(properties))) } else { ret, _, _ = syscall.Syscall(vss.VTable().getSnapshotProperties, 3, @@ -267,13 +275,13 @@ func (vss *IVSS) DeleteSnapshots(snapshotID ole.GUID) (int, int, ole.GUID) { var ret uintptr if runtime.GOARCH == "386" { - address := uint(uintptr(unsafe.Pointer(&snapshotID))) + size := unsafe.Sizeof(uint32(4)) ret, _, _ = syscall.Syscall9(vss.VTable().deleteSnapshots, 9, uintptr(unsafe.Pointer(vss)), - uintptr(*(*uint32)(unsafe.Pointer(uintptr(address)))), - uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 4)))), - uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 8)))), - uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 12)))), + uintptr(*(*uint32)(unsafe.Pointer(&snapshotID))), + uintptr(*(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&snapshotID)) + size))), + uintptr(*(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&snapshotID)) + 2*size))), + uintptr(*(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&snapshotID)) + 3*size))), uintptr(VSS_OBJECT_SNAPSHOT), uintptr(1), uintptr(unsafe.Pointer(&deleted)), @@ -296,15 +304,16 @@ func uint16ArrayToString(p *uint16) string { return "" } s := make([]uint16, 0) - address := uintptr(unsafe.Pointer(p)) + size := unsafe.Sizeof(uint16(0)) + i := uintptr(0) for { - c := *(*uint16)(unsafe.Pointer(address)) + c := *(*uint16)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + i*size)) if c == 0 { break } s = append(s, c) - address = uintptr(int(address) + 2) + i++ } return syscall.UTF16ToString(s) diff --git a/src/duplicacy_snapshot.go b/src/duplicacy_snapshot.go index 07b46915..5ab7d503 100644 --- a/src/duplicacy_snapshot.go +++ b/src/duplicacy_snapshot.go @@ -175,7 +175,7 @@ func ProcessFilterLines(patternFileLines []string, includedFiles []string) (patt if patternIncludeFile == "" { continue } - if ! filepath.IsAbs(patternIncludeFile) { + if !filepath.IsAbs(patternIncludeFile) { basePath := "" if len(includedFiles) == 0 { basePath, _ = os.Getwd() diff --git a/src/duplicacy_snapshotmanager.go b/src/duplicacy_snapshotmanager.go index d5a47be5..893a87c1 100644 --- a/src/duplicacy_snapshotmanager.go +++ b/src/duplicacy_snapshotmanager.go @@ -933,7 +933,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe _, exist, _, err := manager.storage.FindChunk(0, chunkID, false) if err != nil { LOG_WARN("SNAPSHOT_VALIDATE", "Failed to check the existence of chunk %s: %v", - chunkID, err) + chunkID, err) } else if exist { LOG_INFO("SNAPSHOT_VALIDATE", "Chunk %s is confirmed to exist", chunkID) continue @@ -1053,7 +1053,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe if err != nil { LOG_WARN("SNAPSHOT_VERIFY", "Failed to save the verified chunks file: %v", err) } else { - LOG_INFO("SNAPSHOT_VERIFY", "Added %d chunks to the list of verified chunks", len(verifiedChunks) - numberOfVerifiedChunks) + LOG_INFO("SNAPSHOT_VERIFY", "Added %d chunks to the list of verified chunks", len(verifiedChunks)-numberOfVerifiedChunks) } } } @@ -1105,10 +1105,10 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe elapsedTime := time.Now().Sub(startTime).Seconds() speed := int64(float64(downloadedChunkSize) / elapsedTime) - remainingTime := int64(float64(totalChunks - i - 1) / float64(i + 1) * elapsedTime) - percentage := float64(i + 1) / float64(totalChunks) * 100.0 + remainingTime := int64(float64(totalChunks-i-1) / float64(i+1) * elapsedTime) + percentage := float64(i+1) / float64(totalChunks) * 100.0 LOG_INFO("VERIFY_PROGRESS", "Verified chunk %s (%d/%d), %sB/s %s %.1f%%", - chunkID, i + 1, totalChunks, PrettySize(speed), PrettyTime(remainingTime), percentage) + chunkID, i+1, totalChunks, PrettySize(speed), PrettyTime(remainingTime), percentage) } if manager.chunkDownloader.NumberOfFailedChunks > 0 { @@ -1457,8 +1457,8 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path file := manager.FindFile(snapshot, path, false) if !manager.RetrieveFile(snapshot, file, func(chunk []byte) { - fmt.Printf("%s", chunk) - }) { + fmt.Printf("%s", chunk) + }) { LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d", path, snapshot.ID, snapshot.Revision) return false diff --git a/src/duplicacy_storage.go b/src/duplicacy_storage.go index e098026c..f7e2e51b 100644 --- a/src/duplicacy_storage.go +++ b/src/duplicacy_storage.go @@ -627,7 +627,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor // Handle writing directly to the root of the drive // For gcd://driveid@/, driveid@ is match[3] not match[2] if matched[2] == "" && strings.HasSuffix(matched[3], "@") { - matched[2], matched[3] = matched[3], matched[2] + matched[2], matched[3] = matched[3], matched[2] } driveID := matched[2] if driveID != "" { @@ -646,13 +646,13 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor } else if matched[1] == "one" || matched[1] == "odb" { storagePath := matched[3] + matched[4] prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):") - tokenFile := GetPassword(preference, matched[1] + "_token", prompt, true, resetPassword) + tokenFile := GetPassword(preference, matched[1]+"_token", prompt, true, resetPassword) oneDriveStorage, err := CreateOneDriveStorage(tokenFile, matched[1] == "odb", storagePath, threads) if err != nil { LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err) return nil } - SavePassword(preference, matched[1] + "_token", tokenFile) + SavePassword(preference, matched[1]+"_token", tokenFile) return oneDriveStorage } else if matched[1] == "hubic" { storagePath := matched[3] + matched[4] diff --git a/src/duplicacy_utils.go b/src/duplicacy_utils.go index 7f6754b5..ba6c8155 100644 --- a/src/duplicacy_utils.go +++ b/src/duplicacy_utils.go @@ -11,10 +11,10 @@ import ( "io" "os" "regexp" + "runtime" "strconv" "strings" "time" - "runtime" "github.com/gilbertchen/gopass" "golang.org/x/crypto/pbkdf2" @@ -56,7 +56,7 @@ func IsEmptyFilter(pattern string) bool { } func IsUnspecifiedFilter(pattern string) bool { - if pattern[0] != '+' && pattern[0] != '-' && !strings.HasPrefix(pattern, "i:") && !strings.HasPrefix(pattern, "e:") { + if pattern[0] != '+' && pattern[0] != '-' && !strings.HasPrefix(pattern, "i:") && !strings.HasPrefix(pattern, "e:") { return true } else { return false @@ -142,7 +142,9 @@ func RateLimitedCopy(writer io.Writer, reader io.Reader, rate int) (written int6 if rate <= 0 { return io.Copy(writer, reader) } - for range time.Tick(time.Second / 5) { + ticker := time.NewTicker(time.Second / 5) + defer ticker.Stop() + for range ticker.C { n, err := io.CopyN(writer, reader, int64(rate*1024/5)) written += n if err != nil { @@ -469,7 +471,7 @@ func PrintMemoryUsage() { runtime.ReadMemStats(&m) LOG_INFO("MEMORY_STATS", "Currently allocated: %s, total allocated: %s, system memory: %s, number of GCs: %d", - PrettySize(int64(m.Alloc)), PrettySize(int64(m.TotalAlloc)), PrettySize(int64(m.Sys)), m.NumGC) + PrettySize(int64(m.Alloc)), PrettySize(int64(m.TotalAlloc)), PrettySize(int64(m.Sys)), m.NumGC) time.Sleep(time.Second) } diff --git a/src/duplicacy_utils_freebsd.go b/src/duplicacy_utils_freebsd.go index a1292840..2c5f5c92 100644 --- a/src/duplicacy_utils_freebsd.go +++ b/src/duplicacy_utils_freebsd.go @@ -4,10 +4,7 @@ package duplicacy -import ( -) - func excludedByAttribute(attirbutes map[string][]byte) bool { _, ok := attirbutes["duplicacy_exclude"] return ok -} \ No newline at end of file +} diff --git a/src/duplicacy_utils_linux.go b/src/duplicacy_utils_linux.go index 223f32c5..2c5f5c92 100644 --- a/src/duplicacy_utils_linux.go +++ b/src/duplicacy_utils_linux.go @@ -4,9 +4,6 @@ package duplicacy -import ( -) - func excludedByAttribute(attirbutes map[string][]byte) bool { _, ok := attirbutes["duplicacy_exclude"] return ok diff --git a/src/duplicacy_utils_others.go b/src/duplicacy_utils_others.go index 1ed1462a..44be2a70 100644 --- a/src/duplicacy_utils_others.go +++ b/src/duplicacy_utils_others.go @@ -67,7 +67,6 @@ func (entry *Entry) SetAttributesToFile(fullPath string) { for _, name := range names { - newAttribute, found := entry.Attributes[name] if found { oldAttribute, _ := xattr.Get(fullPath, name) diff --git a/src/duplicacy_utils_test.go b/src/duplicacy_utils_test.go index f53fa584..b1354636 100644 --- a/src/duplicacy_utils_test.go +++ b/src/duplicacy_utils_test.go @@ -92,13 +92,13 @@ func TestMatchPattern(t *testing.T) { } } - for _, pattern := range []string{ "+", "-", "i:", "e:", "+a", "-a", "i:a", "e:a"} { + for _, pattern := range []string{"+", "-", "i:", "e:", "+a", "-a", "i:a", "e:a"} { if IsUnspecifiedFilter(pattern) { t.Errorf("pattern %s has a specified filter", pattern) } } - for _, pattern := range []string{ "i", "e", "ia", "ib", "a", "b"} { + for _, pattern := range []string{"i", "e", "ia", "ib", "a", "b"} { if !IsUnspecifiedFilter(pattern) { t.Errorf("pattern %s does not have a specified filter", pattern) } diff --git a/src/duplicacy_webdavstorage.go b/src/duplicacy_webdavstorage.go index 7777b17c..13514976 100644 --- a/src/duplicacy_webdavstorage.go +++ b/src/duplicacy_webdavstorage.go @@ -17,11 +17,11 @@ import ( "math/rand" "net/http" //"net/http/httputil" + "io/ioutil" "strconv" "strings" "sync" "time" - "io/ioutil" ) type WebDAVStorage struct { @@ -323,7 +323,7 @@ func (storage *WebDAVStorage) ListFiles(threadIndex int, dir string) (files []st // Add the directory to the directory cache storage.directoryCacheLock.Lock() - storage.directoryCache[dir + file] = 1 + storage.directoryCache[dir+file] = 1 storage.directoryCacheLock.Unlock() } @@ -350,8 +350,8 @@ func (storage *WebDAVStorage) GetFileInfo(threadIndex int, filePath string) (exi m, exist := properties["/"+storage.storageDir+filePath] // If no properties exist for the given filePath, remove the trailing / from filePath and search again - if !exist && filePath != "" && filePath[len(filePath) - 1] == '/' { - m, exist = properties["/"+storage.storageDir+filePath[:len(filePath) - 1]] + if !exist && filePath != "" && filePath[len(filePath)-1] == '/' { + m, exist = properties["/"+storage.storageDir+filePath[:len(filePath)-1]] } if !exist {