From a953c4ec28dbe0169b3271f3f6fe6937517ab0d3 Mon Sep 17 00:00:00 2001 From: Gilbert Chen Date: Thu, 7 Apr 2022 22:31:18 -0400 Subject: [PATCH] Don't parse test parameters in init() This is to make test parameter parsing work with newer versions of Go --- src/duplicacy_backupmanager_test.go | 4 +- src/duplicacy_chunk_test.go | 6 +- src/duplicacy_chunkuploader_test.go | 10 +-- src/duplicacy_snapshotmanager_test.go | 2 +- src/duplicacy_storage_test.go | 93 ++++++++++++--------------- 5 files changed, 52 insertions(+), 63 deletions(-) diff --git a/src/duplicacy_backupmanager_test.go b/src/duplicacy_backupmanager_test.go index 988bf43f..4aede099 100644 --- a/src/duplicacy_backupmanager_test.go +++ b/src/duplicacy_backupmanager_test.go @@ -235,12 +235,12 @@ func TestBackupManager(t *testing.T) { dataShards := 0 parityShards := 0 - if testErasureCoding { + if *testErasureCoding { dataShards = 5 parityShards = 2 } - if testFixedChunkSize { + if *testFixedChunkSize { if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false, "", dataShards, parityShards) { t.Errorf("Failed to initialize the storage") } diff --git a/src/duplicacy_chunk_test.go b/src/duplicacy_chunk_test.go index dea0605f..cb7fc419 100644 --- a/src/duplicacy_chunk_test.go +++ b/src/duplicacy_chunk_test.go @@ -62,7 +62,7 @@ func TestChunkBasic(t *testing.T) { config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL maxSize := 1000000 - if testRSAEncryption { + if *testRSAEncryption { privateKey, err := rsa.GenerateKey(crypto_rand.Reader, 2048) if err != nil { t.Errorf("Failed to generate a random private key: %v", err) @@ -71,7 +71,7 @@ func TestChunkBasic(t *testing.T) { config.rsaPublicKey = privateKey.Public().(*rsa.PublicKey) } - if testErasureCoding { + if *testErasureCoding { config.DataShards = 5 config.ParityShards = 2 } @@ -98,7 +98,7 @@ func TestChunkBasic(t *testing.T) { encryptedData := make([]byte, chunk.GetLength()) copy(encryptedData, chunk.GetBytes()) - if testErasureCoding { + if *testErasureCoding { offset := 24 + 32 * 7 start := rand.Int() % (len(encryptedData) - offset) + offset length := (len(encryptedData) - offset) / 7 diff --git a/src/duplicacy_chunkuploader_test.go b/src/duplicacy_chunkuploader_test.go index c31c1c18..64422264 100644 --- a/src/duplicacy_chunkuploader_test.go +++ b/src/duplicacy_chunkuploader_test.go @@ -38,7 +38,7 @@ func TestUploaderAndDownloader(t *testing.T) { os.RemoveAll(testDir) os.MkdirAll(testDir, 0700) - t.Logf("storage: %s", testStorageName) + t.Logf("storage: %s", *testStorageName) storage, err := loadStorage(testDir, 1) if err != nil { @@ -46,7 +46,7 @@ func TestUploaderAndDownloader(t *testing.T) { return } storage.EnableTestMode() - storage.SetRateLimits(testRateLimit, testRateLimit) + storage.SetRateLimits(*testRateLimit, *testRateLimit) for _, dir := range []string{"chunks", "snapshots"} { err = storage.CreateDirectory(0, dir) @@ -59,7 +59,7 @@ func TestUploaderAndDownloader(t *testing.T) { numberOfChunks := 100 maxChunkSize := 64 * 1024 - if testQuickMode { + if *testQuickMode { numberOfChunks = 10 } @@ -91,7 +91,7 @@ func TestUploaderAndDownloader(t *testing.T) { t.Logf("Chunk %s size %d (%d/%d) uploaded", chunk.GetID(), chunkSize, chunkIndex, len(chunks)) } - chunkUploader := CreateChunkUploader(config, storage, nil, testThreads, nil) + chunkUploader := CreateChunkUploader(config, storage, nil, *testThreads, nil) chunkUploader.completionFunc = completionFunc chunkUploader.Start() @@ -101,7 +101,7 @@ func TestUploaderAndDownloader(t *testing.T) { chunkUploader.Stop() - chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads, false) + chunkDownloader := CreateChunkDownloader(config, storage, nil, true, *testThreads, false) chunkDownloader.totalChunkSize = int64(totalFileSize) for _, chunk := range chunks { diff --git a/src/duplicacy_snapshotmanager_test.go b/src/duplicacy_snapshotmanager_test.go index 0bb9c2d5..a839edaf 100644 --- a/src/duplicacy_snapshotmanager_test.go +++ b/src/duplicacy_snapshotmanager_test.go @@ -120,7 +120,7 @@ func uploadTestChunk(manager *SnapshotManager, content []byte) string { LOG_INFO("UPLOAD_CHUNK", "Chunk %s size %d uploaded", chunk.GetID(), chunkSize) } - chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, testThreads, nil) + chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, *testThreads, nil) chunkUploader.completionFunc = completionFunc chunkUploader.Start() diff --git a/src/duplicacy_storage_test.go b/src/duplicacy_storage_test.go index 4e40f525..898797c0 100644 --- a/src/duplicacy_storage_test.go +++ b/src/duplicacy_storage_test.go @@ -22,28 +22,17 @@ import ( "math/rand" ) -var testStorageName string -var testRateLimit int -var testQuickMode bool -var testThreads int -var testFixedChunkSize bool -var testRSAEncryption bool -var testErasureCoding bool - -func init() { - flag.StringVar(&testStorageName, "storage", "", "the test storage to use") - flag.IntVar(&testRateLimit, "limit-rate", 0, "maximum transfer speed in kbytes/sec") - flag.BoolVar(&testQuickMode, "quick", false, "quick test") - flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads") - flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size") - flag.BoolVar(&testRSAEncryption, "rsa", false, "enable RSA encryption") - flag.BoolVar(&testErasureCoding, "erasure-coding", false, "enable Erasure Coding") - flag.Parse() -} +var testStorageName = flag.String("storage", "", "the test storage to use") +var testRateLimit = flag.Int("limit-rate", 0, "maximum transfer speed in kbytes/sec") +var testQuickMode = flag.Bool("quick", false, "quick test") +var testThreads = flag.Int("threads", 1, "number of downloading/uploading threads") +var testFixedChunkSize = flag.Bool("fixed-chunk-size", false, "fixed chunk size") +var testRSAEncryption = flag.Bool("rsa", false, "enable RSA encryption") +var testErasureCoding = flag.Bool("erasure-coding", false, "enable Erasure Coding") func loadStorage(localStoragePath string, threads int) (Storage, error) { - if testStorageName == "" || testStorageName == "file" { + if *testStorageName == "" || *testStorageName == "file" { storage, err := CreateFileStorage(localStoragePath, false, threads) if storage != nil { // Use a read level of at least 2 because this will catch more errors than a read level of 1. @@ -64,116 +53,116 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) { return nil, err } - config, found := configs[testStorageName] + config, found := configs[*testStorageName] if !found { - return nil, fmt.Errorf("No storage named '%s' found", testStorageName) + return nil, fmt.Errorf("No storage named '%s' found", *testStorageName) } - if testStorageName == "flat" { + if *testStorageName == "flat" { storage, err := CreateFileStorage(localStoragePath, false, threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "samba" { + } else if *testStorageName == "samba" { storage, err := CreateFileStorage(localStoragePath, true, threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "sftp" { + } else if *testStorageName == "sftp" { port, _ := strconv.Atoi(config["port"]) storage, err := CreateSFTPStorageWithPassword(config["server"], port, config["username"], config["directory"], 2, config["password"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "s3" { + } else if *testStorageName == "s3" { storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, false) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "wasabi" { + } else if *testStorageName == "wasabi" { storage, err := CreateWasabiStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "s3c" { + } else if *testStorageName == "s3c" { storage, err := CreateS3CStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "digitalocean" { + } else if *testStorageName == "digitalocean" { storage, err := CreateS3CStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "minio" { + } else if *testStorageName == "minio" { storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, false, true) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "minios" { + } else if *testStorageName == "minios" { storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, true) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "dropbox" { + } else if *testStorageName == "dropbox" { storage, err := CreateDropboxStorage(config["token"], config["directory"], 1, threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "b2" { + } else if *testStorageName == "b2" { storage, err := CreateB2Storage(config["account"], config["key"], "", config["bucket"], config["directory"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "gcs-s3" { + } else if *testStorageName == "gcs-s3" { storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, false) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "gcs" { + } else if *testStorageName == "gcs" { storage, err := CreateGCSStorage(config["token_file"], config["bucket"], config["directory"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "gcs-sa" { + } else if *testStorageName == "gcs-sa" { storage, err := CreateGCSStorage(config["token_file"], config["bucket"], config["directory"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "azure" { + } else if *testStorageName == "azure" { storage, err := CreateAzureStorage(config["account"], config["key"], config["container"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "acd" { + } else if *testStorageName == "acd" { storage, err := CreateACDStorage(config["token_file"], config["storage_path"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "gcd" { + } else if *testStorageName == "gcd" { storage, err := CreateGCDStorage(config["token_file"], "", config["storage_path"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "gcd-shared" { + } else if *testStorageName == "gcd-shared" { storage, err := CreateGCDStorage(config["token_file"], config["drive"], config["storage_path"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "gcd-impersonate" { + } else if *testStorageName == "gcd-impersonate" { storage, err := CreateGCDStorage(config["token_file"], config["drive"], config["storage_path"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "one" { + } else if *testStorageName == "one" { storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "odb" { + } else if *testStorageName == "odb" { storage, err := CreateOneDriveStorage(config["token_file"], true, config["storage_path"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "one" { + } else if *testStorageName == "one" { storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "hubic" { + } else if *testStorageName == "hubic" { storage, err := CreateHubicStorage(config["token_file"], config["storage_path"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "memset" { + } else if *testStorageName == "memset" { storage, err := CreateSwiftStorage(config["storage_url"], config["key"], threads) storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "pcloud" || testStorageName == "box" { + } else if *testStorageName == "pcloud" || *testStorageName == "box" { storage, err := CreateWebDAVStorage(config["host"], 0, config["username"], config["password"], config["storage_path"], false, threads) if err != nil { return nil, err } storage.SetDefaultNestingLevels([]int{2, 3}, 2) return storage, err - } else if testStorageName == "fabric" { + } else if *testStorageName == "fabric" { storage, err := CreateFileFabricStorage(config["endpoint"], config["token"], config["storage_path"], threads) if err != nil { return nil, err @@ -182,7 +171,7 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) { return storage, err } - return nil, fmt.Errorf("Invalid storage named: %s", testStorageName) + return nil, fmt.Errorf("Invalid storage named: %s", *testStorageName) } func cleanStorage(storage Storage) { @@ -322,7 +311,7 @@ func TestStorage(t *testing.T) { os.RemoveAll(testDir) os.MkdirAll(testDir, 0700) - LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName) + LOG_INFO("STORAGE_TEST", "storage: %s", *testStorageName) threads := 8 storage, err := loadStorage(testDir, threads) @@ -331,7 +320,7 @@ func TestStorage(t *testing.T) { return } storage.EnableTestMode() - storage.SetRateLimits(testRateLimit, testRateLimit) + storage.SetRateLimits(*testRateLimit, *testRateLimit) delay := 0 if _, ok := storage.(*ACDStorage); ok { @@ -453,7 +442,7 @@ func TestStorage(t *testing.T) { numberOfFiles := 10 maxFileSize := 64 * 1024 - if testQuickMode { + if *testQuickMode { numberOfFiles = 2 } @@ -588,7 +577,7 @@ func TestCleanStorage(t *testing.T) { os.RemoveAll(testDir) os.MkdirAll(testDir, 0700) - LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName) + LOG_INFO("STORAGE_TEST", "storage: %s", *testStorageName) storage, err := loadStorage(testDir, 1) if err != nil {