Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clean up formatting and warnings #627

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions duplicacy/duplicacy_main.go
Original file line number Diff line number Diff line change
Expand Up @@ -1019,7 +1019,6 @@ func printFile(context *cli.Context) {
snapshotID = context.String("id")
}


backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false)
duplicacy.SavePassword(*preference, "password", password)

Expand Down Expand Up @@ -1266,7 +1265,7 @@ func copySnapshots(context *cli.Context) {
destinationStorage.SetRateLimits(0, context.Int("upload-limit-rate"))

destinationManager := duplicacy.CreateBackupManager(destination.SnapshotID, destinationStorage, repository,
destinationPassword, "", "", false)
destinationPassword, "", "", false)
duplicacy.SavePassword(*destination, "password", destinationPassword)
destinationManager.SetupSnapshotCache(destination.Name)

Expand Down Expand Up @@ -1391,7 +1390,7 @@ func benchmark(context *cli.Context) {
if storage == nil {
return
}
duplicacy.Benchmark(repository, storage, int64(fileSize) * 1024 * 1024, chunkSize * 1024 * 1024, chunkCount, uploadThreads, downloadThreads)
duplicacy.Benchmark(repository, storage, int64(fileSize)*1024*1024, chunkSize*1024*1024, chunkCount, uploadThreads, downloadThreads)
}

func main() {
Expand Down Expand Up @@ -1569,7 +1568,7 @@ func main() {
cli.BoolFlag{
Name: "persist",
Usage: "continue processing despite chunk errors or existing files (without -overwrite), reporting any affected files",
},
},
cli.StringFlag{
Name: "key-passphrase",
Usage: "the passphrase to decrypt the RSA private key",
Expand Down Expand Up @@ -2180,8 +2179,8 @@ func main() {
Usage: "add a comment to identify the process",
},
cli.StringSliceFlag{
Name: "suppress, s",
Usage: "suppress logs with the specified id",
Name: "suppress, s",
Usage: "suppress logs with the specified id",
Argument: "<id>",
},
cli.BoolFlag{
Expand Down
61 changes: 30 additions & 31 deletions src/duplicacy_b2client.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,22 +5,22 @@
package duplicacy

import (
"io"
"os"
"fmt"
"bytes"
"time"
"sync"
"strconv"
"strings"
"net/url"
"net/http"
"math/rand"
"io/ioutil"
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"sync"
"time"
)

type B2Error struct {
Expand All @@ -41,27 +41,27 @@ type B2UploadArgument struct {
var B2AuthorizationURL = "https://api.backblazeb2.com/b2api/v1/b2_authorize_account"

type B2Client struct {
HTTPClient *http.Client
HTTPClient *http.Client

AccountID string
ApplicationKeyID string
ApplicationKey string
BucketName string
BucketID string
StorageDir string
AccountID string
ApplicationKeyID string
ApplicationKey string
BucketName string
BucketID string
StorageDir string

Lock sync.Mutex
AuthorizationToken string
APIURL string
DownloadURL string
IsAuthorized bool

UploadURLs []string
UploadTokens []string
UploadURLs []string
UploadTokens []string

Threads int
MaximumRetries int
TestMode bool
Threads int
MaximumRetries int
TestMode bool

LastAuthorizationTime int64
}
Expand All @@ -81,7 +81,7 @@ func NewB2Client(applicationKeyID string, applicationKey string, downloadURL str
storageDir = storageDir[1:]
}

if storageDir != "" && storageDir[len(storageDir) - 1] != '/' {
if storageDir != "" && storageDir[len(storageDir)-1] != '/' {
storageDir += "/"
}

Expand Down Expand Up @@ -128,7 +128,7 @@ func (client *B2Client) retry(retries int, response *http.Response) int {
}
}

if retries >= client.MaximumRetries + 1 {
if retries >= client.MaximumRetries+1 {
return 0
}
retries++
Expand All @@ -143,7 +143,7 @@ func (client *B2Client) retry(retries int, response *http.Response) int {
}

func (client *B2Client) call(threadIndex int, requestURL string, method string, requestHeaders map[string]string, input interface{}) (
io.ReadCloser, http.Header, int64, error) {
io.ReadCloser, http.Header, int64, error) {

var response *http.Response

Expand Down Expand Up @@ -171,7 +171,6 @@ func (client *B2Client) call(threadIndex int, requestURL string, method string,
inputReader = rateLimitedReader
}


if isUpload {
if client.UploadURLs[threadIndex] == "" || client.UploadTokens[threadIndex] == "" {
err := client.getUploadURL(threadIndex)
Expand Down Expand Up @@ -303,7 +302,7 @@ func (client *B2Client) AuthorizeAccount(threadIndex int) (err error, allowed bo
defer client.Lock.Unlock()

// Don't authorize if the previous one was done less than 30 seconds ago
if client.LastAuthorizationTime != 0 && client.LastAuthorizationTime > time.Now().Unix() - 30 {
if client.LastAuthorizationTime != 0 && client.LastAuthorizationTime > time.Now().Unix()-30 {
return nil, false
}

Expand Down Expand Up @@ -426,7 +425,7 @@ func (client *B2Client) ListFileNames(threadIndex int, startFileName string, sin
apiURL = client.getAPIURL() + "/b2api/v1/b2_list_file_versions"
} else if singleFile {
// handle a single file with no versions as a special case to download the last byte of the file
apiURL = client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + startFileName)
apiURL = client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir+startFileName)
// requesting byte -1 works for empty files where 0-0 fails with a 416 error
requestHeaders["Range"] = "bytes=-1"
// HEAD request
Expand Down Expand Up @@ -584,7 +583,7 @@ func (client *B2Client) HideFile(threadIndex int, fileName string) (fileID strin

func (client *B2Client) DownloadFile(threadIndex int, filePath string) (io.ReadCloser, int64, error) {

url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + filePath)
url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir+filePath)

readCloser, _, len, err := client.call(threadIndex, url, http.MethodGet, make(map[string]string), 0)
return readCloser, len, err
Expand Down
43 changes: 21 additions & 22 deletions src/duplicacy_backupmanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,9 @@ type BackupManager struct {

nobackupFile string // don't backup directory when this file name is found

filtersFile string // the path to the filters file
filtersFile string // the path to the filters file

excludeByAttribute bool // don't backup file based on file attribute
excludeByAttribute bool // don't backup file based on file attribute

}

Expand Down Expand Up @@ -117,7 +117,6 @@ func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
return true
}


// setEntryContent sets the 4 content pointers for each entry in 'entries'. 'offset' indicates the value
// to be added to the StartChunk and EndChunk points, used when intending to append 'entries' to the
// original unchanged entry list.
Expand Down Expand Up @@ -193,7 +192,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta

if manager.config.DataShards != 0 && manager.config.ParityShards != 0 {
LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled with %d data shards and %d parity shards",
manager.config.DataShards, manager.config.ParityShards)
manager.config.DataShards, manager.config.ParityShards)
}

if manager.config.rsaPublicKey != nil && len(manager.config.FileKey) > 0 {
Expand All @@ -217,7 +216,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta

LOG_INFO("BACKUP_INDEXING", "Indexing %s", top)
localSnapshot, skippedDirectories, skippedFiles, err := CreateSnapshotFromDirectory(manager.snapshotID, shadowTop,
manager.nobackupFile, manager.filtersFile, manager.excludeByAttribute)
manager.nobackupFile, manager.filtersFile, manager.excludeByAttribute)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
return false
Expand Down Expand Up @@ -800,7 +799,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
manager.SnapshotManager.DownloadSnapshotContents(remoteSnapshot, patterns, true)

localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top, manager.nobackupFile,
manager.filtersFile, manager.excludeByAttribute)
manager.filtersFile, manager.excludeByAttribute)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the repository: %v", err)
return 0
Expand Down Expand Up @@ -833,7 +832,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
var failedFiles int
var skippedFileSize int64
var skippedFiles int64

var downloadedFiles []*Entry

i := 0
Expand Down Expand Up @@ -1202,8 +1201,8 @@ func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *C
// Restore downloads a file from the storage. If 'inPlace' is false, the download file is saved first to a temporary
// file under the .duplicacy directory and then replaces the existing one. Otherwise, the existing file will be
// overwritten directly.
// Return: true, nil: Restored file;
// false, nil: Skipped file;
// Return: true, nil: Restored file;
// false, nil: Skipped file;
// false, error: Failure to restore file (only if allowFailures == true)
func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chunkMaker *ChunkMaker, entry *Entry, top string, inPlace bool, overwrite bool,
showStatistics bool, totalFileSize int64, downloadedFileSize int64, startTime int64, allowFailures bool) (bool, error) {
Expand Down Expand Up @@ -1379,7 +1378,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
// fileHash != entry.Hash, warn/error depending on -overwrite option
if !overwrite && !isNewFile {
LOG_WERROR(allowFailures, "DOWNLOAD_OVERWRITE",
"File %s already exists. Please specify the -overwrite option to overwrite", entry.Path)
"File %s already exists. Please specify the -overwrite option to overwrite", entry.Path)
return false, fmt.Errorf("file exists")
}

Expand Down Expand Up @@ -1625,7 +1624,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho

if otherManager.config.DataShards != 0 && otherManager.config.ParityShards != 0 {
LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled for the destination storage with %d data shards and %d parity shards",
otherManager.config.DataShards, otherManager.config.ParityShards)
otherManager.config.DataShards, otherManager.config.ParityShards)
}

if otherManager.config.rsaPublicKey != nil && len(otherManager.config.FileKey) > 0 {
Expand Down Expand Up @@ -1712,7 +1711,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
}

// These two maps store hashes of chunks in the source and destination storages, respectively. Note that
// the value of 'chunks' is used to indicated if the chunk is a snapshot chunk, while the value of 'otherChunks'
// the value of 'chunks' is used to indicated if the chunk is a snapshot chunk, while the value of 'otherChunks'
// is not used.
chunks := make(map[string]bool)
otherChunks := make(map[string]bool)
Expand All @@ -1726,15 +1725,15 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
LOG_TRACE("SNAPSHOT_COPY", "Copying snapshot %s at revision %d", snapshot.ID, snapshot.Revision)

for _, chunkHash := range snapshot.FileSequence {
chunks[chunkHash] = true // The chunk is a snapshot chunk
chunks[chunkHash] = true // The chunk is a snapshot chunk
}

for _, chunkHash := range snapshot.ChunkSequence {
chunks[chunkHash] = true // The chunk is a snapshot chunk
chunks[chunkHash] = true // The chunk is a snapshot chunk
}

for _, chunkHash := range snapshot.LengthSequence {
chunks[chunkHash] = true // The chunk is a snapshot chunk
chunks[chunkHash] = true // The chunk is a snapshot chunk
}

description := manager.SnapshotManager.DownloadSequence(snapshot.ChunkSequence)
Expand All @@ -1747,7 +1746,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho

for _, chunkHash := range snapshot.ChunkHashes {
if _, found := chunks[chunkHash]; !found {
chunks[chunkHash] = false // The chunk is a file chunk
chunks[chunkHash] = false // The chunk is a file chunk
}
}

Expand Down Expand Up @@ -1779,7 +1778,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
}
}

LOG_INFO("SNAPSHOT_COPY", "Chunks to copy: %d, to skip: %d, total: %d", len(chunksToCopy), len(chunks) - len(chunksToCopy), len(chunks))
LOG_INFO("SNAPSHOT_COPY", "Chunks to copy: %d, to skip: %d, total: %d", len(chunksToCopy), len(chunks)-len(chunksToCopy), len(chunks))

chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, false, downloadingThreads, false)

Expand All @@ -1799,11 +1798,11 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho

elapsedTime := time.Now().Sub(startTime).Seconds()
speed := int64(float64(atomic.LoadInt64(&uploadedBytes)) / elapsedTime)
remainingTime := int64(float64(len(chunksToCopy) - chunkIndex - 1) / float64(chunkIndex + 1) * elapsedTime)
percentage := float64(chunkIndex + 1) / float64(len(chunksToCopy)) * 100.0
remainingTime := int64(float64(len(chunksToCopy)-chunkIndex-1) / float64(chunkIndex+1) * elapsedTime)
percentage := float64(chunkIndex+1) / float64(len(chunksToCopy)) * 100.0
LOG_INFO("COPY_PROGRESS", "%s chunk %s (%d/%d) %sB/s %s %.1f%%",
action, chunk.GetID(), chunkIndex + 1, len(chunksToCopy),
PrettySize(speed), PrettyTime(remainingTime), percentage)
action, chunk.GetID(), chunkIndex+1, len(chunksToCopy),
PrettySize(speed), PrettyTime(remainingTime), percentage)
otherManager.config.PutChunk(chunk)
})

Expand All @@ -1827,7 +1826,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
chunkDownloader.Stop()
chunkUploader.Stop()

LOG_INFO("SNAPSHOT_COPY", "Copied %d new chunks and skipped %d existing chunks", copiedChunks, len(chunks) - copiedChunks)
LOG_INFO("SNAPSHOT_COPY", "Copied %d new chunks and skipped %d existing chunks", copiedChunks, len(chunks)-copiedChunks)

for _, snapshot := range snapshots {
if revisionMap[snapshot.ID][snapshot.Revision] == false {
Expand Down
Loading