From 3070427adf49c9e02e7723fdc4168d51d5563e93 Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 31 Jan 2023 00:03:51 -0800 Subject: [PATCH 1/2] removed Gopkg things now that go.mod is better and mandatory, and initialized it and was able to get the binary to build --- Gopkg.lock | 21 - Gopkg.toml | 30 -- go.mod | 10 + go.sum | 6 + .../t3rm1n4l/go-humanize/bytes_test.go | 148 ------ .../t3rm1n4l/go-humanize/comma_test.go | 27 -- .../t3rm1n4l/go-humanize/ordinals_test.go | 20 - .../t3rm1n4l/go-humanize/times_test.go | 126 ----- .../github.com/t3rm1n4l/go-mega/.travis.yml | 9 +- vendor/github.com/t3rm1n4l/go-mega/LICENSE | 21 + vendor/github.com/t3rm1n4l/go-mega/README.md | 2 +- vendor/github.com/t3rm1n4l/go-mega/errors.go | 45 +- vendor/github.com/t3rm1n4l/go-mega/mega.go | 431 ++++++++++++++---- .../github.com/t3rm1n4l/go-mega/mega_test.go | 403 ---------------- .../github.com/t3rm1n4l/go-mega/messages.go | 36 +- vendor/github.com/t3rm1n4l/go-mega/utils.go | 170 ++++--- .../github.com/t3rm1n4l/go-mega/utils_test.go | 105 ----- vendor/golang.org/x/crypto/AUTHORS | 3 + vendor/golang.org/x/crypto/CONTRIBUTORS | 3 + vendor/golang.org/x/crypto/LICENSE | 27 ++ vendor/golang.org/x/crypto/PATENTS | 22 + vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 ++++ vendor/modules.txt | 9 + 23 files changed, 698 insertions(+), 1053 deletions(-) delete mode 100644 Gopkg.lock delete mode 100644 Gopkg.toml create mode 100644 go.mod create mode 100644 go.sum delete mode 100644 vendor/github.com/t3rm1n4l/go-humanize/bytes_test.go delete mode 100644 vendor/github.com/t3rm1n4l/go-humanize/comma_test.go delete mode 100644 vendor/github.com/t3rm1n4l/go-humanize/ordinals_test.go delete mode 100644 vendor/github.com/t3rm1n4l/go-humanize/times_test.go create mode 100644 vendor/github.com/t3rm1n4l/go-mega/LICENSE delete mode 100644 vendor/github.com/t3rm1n4l/go-mega/mega_test.go delete mode 100644 vendor/github.com/t3rm1n4l/go-mega/utils_test.go create mode 100644 vendor/golang.org/x/crypto/AUTHORS create mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS create mode 100644 vendor/golang.org/x/crypto/LICENSE create mode 100644 vendor/golang.org/x/crypto/PATENTS create mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go create mode 100644 vendor/modules.txt diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index d14072e..0000000 --- a/Gopkg.lock +++ /dev/null @@ -1,21 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - name = "github.com/t3rm1n4l/go-humanize" - packages = ["."] - revision = "e7ed15be05eb554fbaa83ac9b335556d6390fb9f" - -[[projects]] - branch = "master" - name = "github.com/t3rm1n4l/go-mega" - packages = ["."] - revision = "57978a63bd3f91fa7e188b751a7e7e6dd4e33813" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "92e551f65fcf7bb6495bdd56c208e3d557b2e2900fa684113072688b6080b661" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index 799a31f..0000000 --- a/Gopkg.toml +++ /dev/null @@ -1,30 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - branch = "master" - name = "github.com/t3rm1n4l/go-humanize" - -[[constraint]] - branch = "master" - name = "github.com/t3rm1n4l/go-mega" diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..a6de18c --- /dev/null +++ b/go.mod @@ -0,0 +1,10 @@ +module github.com/t3rm1n4l/megacmd + +go 1.18 + +require ( + github.com/t3rm1n4l/go-humanize v0.0.0-20121229052809-e7ed15be05eb + github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf +) + +require golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613 // indirect diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..27c0727 --- /dev/null +++ b/go.sum @@ -0,0 +1,6 @@ +github.com/t3rm1n4l/go-humanize v0.0.0-20121229052809-e7ed15be05eb h1:Y3ZZPZq8LFM+Hd3jIMAB9hdUvqMgUYoPL+An9kj9m8I= +github.com/t3rm1n4l/go-humanize v0.0.0-20121229052809-e7ed15be05eb/go.mod h1:RKcvJ+wnT/VTNZ92WydAGo1jX2nN3AuHqWmD2zTysF0= +github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf h1:Y43S3e9P1NPs/QF4R5/SdlXj2d31540hP4Gk8VKNvDg= +github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf/go.mod h1:c+cGNU1qi9bO7ZF4IRMYk+KaZTNiQ/gQrSbyMmGFq1Q= +golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613 h1:MQ/ZZiDsUapFFiMS+vzwXkCTeEKaum+Do5rINYJDmxc= +golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= diff --git a/vendor/github.com/t3rm1n4l/go-humanize/bytes_test.go b/vendor/github.com/t3rm1n4l/go-humanize/bytes_test.go deleted file mode 100644 index f4bd28d..0000000 --- a/vendor/github.com/t3rm1n4l/go-humanize/bytes_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package humanize - -import ( - "testing" -) - -func assert(t *testing.T, name string, got interface{}, expected interface{}) { - if got != expected { - t.Errorf("Expected %#v for %s, got %#v", expected, name, got) - } -} - -func TestByteParsing(t *testing.T) { - tests := []struct { - in string - exp uint64 - }{ - {"42", 42}, - {"42MB", 42000000}, - {"42MiB", 44040192}, - {"42mb", 42000000}, - {"42mib", 44040192}, - {"42MIB", 44040192}, - {"42 MB", 42000000}, - {"42 MiB", 44040192}, - {"42 mb", 42000000}, - {"42 mib", 44040192}, - {"42 MIB", 44040192}, - {"42.5MB", 42500000}, - {"42.5MiB", 44564480}, - {"42.5 MB", 42500000}, - {"42.5 MiB", 44564480}, - // No need to say B - {"42M", 42000000}, - {"42Mi", 44040192}, - {"42m", 42000000}, - {"42mi", 44040192}, - {"42MI", 44040192}, - {"42 M", 42000000}, - {"42 Mi", 44040192}, - {"42 m", 42000000}, - {"42 mi", 44040192}, - {"42 MI", 44040192}, - {"42.5M", 42500000}, - {"42.5Mi", 44564480}, - {"42.5 M", 42500000}, - {"42.5 Mi", 44564480}, - // Large testing, breaks when too much larger than - // this. - {"12.5 EB", uint64(12.5 * float64(EByte))}, - {"12.5 E", uint64(12.5 * float64(EByte))}, - } - - for _, p := range tests { - got, err := ParseBytes(p.in) - if err != nil { - t.Errorf("Couldn't parse %v: %v", p.in, err) - } - if got != p.exp { - t.Errorf("Expected %v for %v, got %v", - p.exp, p.in, got) - } - } -} - -func TestBytes(t *testing.T) { - assert(t, "bytes(0)", Bytes(0), "0B") - assert(t, "bytes(1)", Bytes(1), "1B") - assert(t, "bytes(803)", Bytes(803), "803B") - assert(t, "bytes(999)", Bytes(999), "999B") -} - -func TestK(t *testing.T) { - assert(t, "bytes(1024)", Bytes(1024), "1.0KB") - assert(t, "bytes(1MB - 1)", Bytes(MByte-Byte), "1000KB") -} - -func TestM(t *testing.T) { - assert(t, "bytes(1MB)", Bytes(1024*1024), "1.0MB") - assert(t, "bytes(1GB - 1K)", Bytes(GByte-KByte), "1000MB") -} - -func TestG(t *testing.T) { - assert(t, "bytes(1GB)", Bytes(GByte), "1.0GB") - assert(t, "bytes(1TB - 1M)", Bytes(TByte-MByte), "1000GB") -} - -func TestT(t *testing.T) { - assert(t, "bytes(1TB)", Bytes(TByte), "1.0TB") - assert(t, "bytes(1PB - 1T)", Bytes(PByte-TByte), "999TB") -} - -func TestP(t *testing.T) { - assert(t, "bytes(1PB)", Bytes(PByte), "1.0PB") - assert(t, "bytes(1PB - 1T)", Bytes(EByte-PByte), "999PB") -} - -func TestE(t *testing.T) { - assert(t, "bytes(1EB)", Bytes(EByte), "1.0EB") - // Overflows. - // assert(t, "bytes(1EB - 1P)", Bytes((KByte*EByte)-PByte), "1023EB") -} - -func TestIIBytes(t *testing.T) { - assert(t, "bytes(0)", IBytes(0), "0B") - assert(t, "bytes(1)", IBytes(1), "1B") - assert(t, "bytes(803)", IBytes(803), "803B") - assert(t, "bytes(1023)", IBytes(1023), "1023B") -} - -func TestIK(t *testing.T) { - assert(t, "bytes(1024)", IBytes(1024), "1.0KiB") - assert(t, "bytes(1MB - 1)", IBytes(MiByte-IByte), "1024KiB") -} - -func TestIM(t *testing.T) { - assert(t, "bytes(1MB)", IBytes(1024*1024), "1.0MiB") - assert(t, "bytes(1GB - 1K)", IBytes(GiByte-KiByte), "1024MiB") -} - -func TestIG(t *testing.T) { - assert(t, "bytes(1GB)", IBytes(GiByte), "1.0GiB") - assert(t, "bytes(1TB - 1M)", IBytes(TiByte-MiByte), "1024GiB") -} - -func TestIT(t *testing.T) { - assert(t, "bytes(1TB)", IBytes(TiByte), "1.0TiB") - assert(t, "bytes(1PB - 1T)", IBytes(PiByte-TiByte), "1023TiB") -} - -func TestIP(t *testing.T) { - assert(t, "bytes(1PB)", IBytes(PiByte), "1.0PiB") - assert(t, "bytes(1PB - 1T)", IBytes(EiByte-PiByte), "1023PiB") -} - -func TestIE(t *testing.T) { - assert(t, "bytes(1EiB)", IBytes(EiByte), "1.0EiB") - // Overflows. - // assert(t, "bytes(1EB - 1P)", IBytes((KIByte*EIByte)-PiByte), "1023EB") -} - -func TestIHalf(t *testing.T) { - assert(t, "bytes(5.5GiB)", IBytes(5.5*GiByte), "5.5GiB") -} - -func TestHalf(t *testing.T) { - assert(t, "bytes(5.5GB)", Bytes(5.5*GByte), "5.5GB") -} diff --git a/vendor/github.com/t3rm1n4l/go-humanize/comma_test.go b/vendor/github.com/t3rm1n4l/go-humanize/comma_test.go deleted file mode 100644 index bf94bdb..0000000 --- a/vendor/github.com/t3rm1n4l/go-humanize/comma_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package humanize - -import ( - "testing" -) - -func TestReverse(t *testing.T) { - assert(t, "", reverse(""), "") - assert(t, "1", reverse("1"), "1") - assert(t, "12", reverse("12"), "21") - assert(t, "123", reverse("123"), "321") - assert(t, "1234", reverse("1234"), "4321") -} - -func TestCommas(t *testing.T) { - assert(t, "0", Comma(0), "0") - assert(t, "10", Comma(10), "10") - assert(t, "100", Comma(100), "100") - assert(t, "1,000", Comma(1000), "1,000") - assert(t, "10,000", Comma(10000), "10,000") - assert(t, "10,000,000", Comma(10000000), "10,000,000") - assert(t, "-10,000,000", Comma(-10000000), "-10,000,000") - assert(t, "-10,000", Comma(-10000), "-10,000") - assert(t, "-1,000", Comma(-1000), "-1,000") - assert(t, "-100", Comma(-100), "-100") - assert(t, "-10", Comma(-10), "-10") -} diff --git a/vendor/github.com/t3rm1n4l/go-humanize/ordinals_test.go b/vendor/github.com/t3rm1n4l/go-humanize/ordinals_test.go deleted file mode 100644 index 717c41d..0000000 --- a/vendor/github.com/t3rm1n4l/go-humanize/ordinals_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package humanize - -import ( - "testing" -) - -func TestOrdinals(t *testing.T) { - assert(t, "0", Ordinal(0), "0th") - assert(t, "1", Ordinal(1), "1st") - assert(t, "2", Ordinal(2), "2nd") - assert(t, "3", Ordinal(3), "3rd") - assert(t, "4", Ordinal(4), "4th") - assert(t, "10", Ordinal(10), "10th") - assert(t, "11", Ordinal(11), "11th") - assert(t, "12", Ordinal(12), "12th") - assert(t, "13", Ordinal(13), "13th") - assert(t, "101", Ordinal(101), "101st") - assert(t, "102", Ordinal(102), "102nd") - assert(t, "103", Ordinal(103), "103rd") -} diff --git a/vendor/github.com/t3rm1n4l/go-humanize/times_test.go b/vendor/github.com/t3rm1n4l/go-humanize/times_test.go deleted file mode 100644 index 2c7385e..0000000 --- a/vendor/github.com/t3rm1n4l/go-humanize/times_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package humanize - -import ( - "testing" - "time" -) - -func checkTime(t *testing.T, expected, got string) { - if got != expected { - t.Fatalf("Expected %s, got %s", expected, got) - } -} - -func TestPast(t *testing.T) { - - expected := []string{ - "now", - "1 second ago", - "12 seconds ago", - "30 seconds ago", - "45 seconds ago", - "15 minutes ago", - "2 hours ago", - "21 hours ago", - "1 day ago", - "2 days ago", - "3 days ago", - "1 week ago", - "1 week ago", - "2 weeks ago", - "1 month ago", - "1 year ago", - } - - i := 0 - now := time.Now().Unix() - - checkTime(t, expected[i], Time(time.Unix(now, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now-1, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now-12, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now-30, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now-45, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now-15*Minute, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now-2*Hour, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now-21*Hour, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now-26*Hour, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now-49*Hour, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now-3*Day, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now-7*Day, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now-12*Day, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now-15*Day, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now-39*Day, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now-365*Day, 0))) -} - -func TestFuture(t *testing.T) { - - expected := []string{ - "now", - "1 second from now", - "12 seconds from now", - "30 seconds from now", - "45 seconds from now", - "15 minutes from now", - "2 hours from now", - "21 hours from now", - "1 day from now", - "2 days from now", - "3 days from now", - "1 week from now", - "1 week from now", - "2 weeks from now", - "1 month from now", - "1 year from now", - } - - i := 0 - now := time.Now().Unix() - - checkTime(t, expected[i], Time(time.Unix(now, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now+1, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now+12, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now+30, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now+45, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now+15*Minute, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now+2*Hour, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now+21*Hour, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now+26*Hour, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now+49*Hour, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now+3*Day, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now+7*Day, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now+12*Day, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now+15*Day, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now+39*Day, 0))) - i++ - checkTime(t, expected[i], Time(time.Unix(now+365*Day, 0))) -} diff --git a/vendor/github.com/t3rm1n4l/go-mega/.travis.yml b/vendor/github.com/t3rm1n4l/go-mega/.travis.yml index dda4077..323ac8a 100644 --- a/vendor/github.com/t3rm1n4l/go-mega/.travis.yml +++ b/vendor/github.com/t3rm1n4l/go-mega/.travis.yml @@ -4,13 +4,12 @@ osx_image: xcode7.3 os: - linux go: -- 1.7.6 -- 1.8.7 -- 1.9.5 -- "1.10.1" +- 1.11.x +- 1.12.x - tip install: - make build_dep +- go get -u ./... script: - make check - make test @@ -19,7 +18,7 @@ matrix: - go: tip include: - os: osx - go: "1.10.1" + go: "1.12.x" env: global: - secure: RzsF80V1i69FVJwKSF8WrFzk5bRUKtPxRkhjiLOO0b1usFg0EIY6XFp3s/VTR6oT91LRXml3Bp7wHHrkPvGnHyUyuxj6loj3gIrsX8cZHUtjyQX/Szfi9MOJpbdJvfCcHByEh9YGldAz//9zvEo5oGuI29Luur3cv+BJNJElmHg= diff --git a/vendor/github.com/t3rm1n4l/go-mega/LICENSE b/vendor/github.com/t3rm1n4l/go-mega/LICENSE new file mode 100644 index 0000000..776b559 --- /dev/null +++ b/vendor/github.com/t3rm1n4l/go-mega/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Sarath Lakshman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/t3rm1n4l/go-mega/README.md b/vendor/github.com/t3rm1n4l/go-mega/README.md index f5755f1..fb8161d 100644 --- a/vendor/github.com/t3rm1n4l/go-mega/README.md +++ b/vendor/github.com/t3rm1n4l/go-mega/README.md @@ -23,7 +23,7 @@ This is an API client library for MEGA storage service. Currently, the library s ### API methods -Please find full doc at [http://godoc.org/github.com/t3rm1n4l/go-mega](http://godoc.org/github.com/t3rm1n4l/go-mega) +Please find full doc at [https://pkg.go.dev/github.com/t3rm1n4l/go-mega](https://pkg.go.dev/github.com/t3rm1n4l/go-mega) ### Testing diff --git a/vendor/github.com/t3rm1n4l/go-mega/errors.go b/vendor/github.com/t3rm1n4l/go-mega/errors.go index 343ea7c..2c02fb5 100644 --- a/vendor/github.com/t3rm1n4l/go-mega/errors.go +++ b/vendor/github.com/t3rm1n4l/go-mega/errors.go @@ -20,18 +20,25 @@ var ( EEXPIRED = errors.New("The upload target URL you are trying to access has expired. Please request a fresh one") // Filesystem/Account errors - ENOENT = errors.New("Object (typically, node or user) not found") - ECIRCULAR = errors.New("Circular linkage attempted") - EACCESS = errors.New("Access violation") - EEXIST = errors.New("Trying to create an object that already exists") - EINCOMPLETE = errors.New("Trying to access an incomplete resource") - EKEY = errors.New("A decryption operation failed") - ESID = errors.New("Invalid or expired user session, please relogin") - EBLOCKED = errors.New("User blocked") - EOVERQUOTA = errors.New("Request over quota") - ETEMPUNAVAIL = errors.New("Resource temporarily not available, please try again later") - EMACMISMATCH = errors.New("MAC verification failed") - EBADATTR = errors.New("Bad node attribute") + ENOENT = errors.New("Object (typically, node or user) not found") + ECIRCULAR = errors.New("Circular linkage attempted") + EACCESS = errors.New("Access violation") + EEXIST = errors.New("Trying to create an object that already exists") + EINCOMPLETE = errors.New("Trying to access an incomplete resource") + EKEY = errors.New("A decryption operation failed") + ESID = errors.New("Invalid or expired user session, please relogin") + EBLOCKED = errors.New("User blocked") + EOVERQUOTA = errors.New("Request over quota") + ETEMPUNAVAIL = errors.New("Resource temporarily not available, please try again later") + EMACMISMATCH = errors.New("MAC verification failed") + EBADATTR = errors.New("Bad node attribute") + ETOOMANYCONNECTIONS = errors.New("Too many connections on this resource.") + EWRITE = errors.New("File could not be written to (or failed post-write integrity check).") + EREAD = errors.New("File could not be read from (or changed unexpectedly during reading).") + EAPPKEY = errors.New("Invalid or missing application key.") + ESSL = errors.New("SSL verification failed") + EGOINGOVERQUOTA = errors.New("Not enough quota") + EMFAREQUIRED = errors.New("Multi-factor authentication required") // Config errors EWORKER_LIMIT_EXCEEDED = errors.New("Maximum worker limit exceeded") @@ -79,6 +86,20 @@ func parseError(errno ErrorMsg) error { return EOVERQUOTA case errno == -18: return ETEMPUNAVAIL + case errno == -19: + return ETOOMANYCONNECTIONS + case errno == -20: + return EWRITE + case errno == -21: + return EREAD + case errno == -22: + return EAPPKEY + case errno == -23: + return ESSL + case errno == -24: + return EGOINGOVERQUOTA + case errno == -26: + return EMFAREQUIRED } return fmt.Errorf("Unknown mega error %d", errno) diff --git a/vendor/github.com/t3rm1n4l/go-mega/mega.go b/vendor/github.com/t3rm1n4l/go-mega/mega.go index 81f992e..30e9a3c 100644 --- a/vendor/github.com/t3rm1n4l/go-mega/mega.go +++ b/vendor/github.com/t3rm1n4l/go-mega/mega.go @@ -5,6 +5,7 @@ import ( "crypto/aes" "crypto/cipher" "crypto/rand" + "crypto/sha512" "encoding/json" "errors" "fmt" @@ -19,6 +20,8 @@ import ( "strings" "sync" "time" + + "golang.org/x/crypto/pbkdf2" ) // Default settings @@ -93,12 +96,16 @@ func (c *config) SetUploadWorkers(w int) error { type Mega struct { config + // Version of the account + accountVersion int + // Salt for the account if accountVersion > 1 + accountSalt []byte // Sequence number sn int64 // Server state sn ssn string // Session ID - sid []byte + sid string // Master key k []byte // User handle @@ -329,7 +336,10 @@ func newMegaFS() *MegaFS { func New() *Mega { max := big.NewInt(0x100000000) - bigx, _ := rand.Int(rand.Reader, max) + bigx, err := rand.Int(rand.Reader, max) + if err != nil { + panic(err) // this should be returned, but this is a public interface + } cfg := newConfig() mgfs := newMegaFS() m := &Mega{ @@ -397,8 +407,8 @@ func (m *Mega) api_request(r []byte) (buf []byte, err error) { url := fmt.Sprintf("%s/cs?id=%d", m.baseurl, m.sn) - if m.sid != nil { - url = fmt.Sprintf("%s&sid=%s", url, string(m.sid)) + if m.sid != "" { + url = fmt.Sprintf("%s&sid=%s", url, m.sid) } sleepTime := minSleepTime // inital backoff time @@ -457,25 +467,92 @@ func (m *Mega) api_request(r []byte) (buf []byte, err error) { return nil, err } +// prelogin call +func (m *Mega) prelogin(email string) error { + var msg [1]PreloginMsg + var res [1]PreloginResp + + email = strings.ToLower(email) // mega uses lowercased emails for login purposes - FIXME is this true for prelogin? + + msg[0].Cmd = "us0" + msg[0].User = email + + req, err := json.Marshal(msg) + if err != nil { + return err + } + result, err := m.api_request(req) + if err != nil { + return err + } + + err = json.Unmarshal(result, &res) + if err != nil { + return err + } + + if res[0].Version == 0 { + return errors.New("prelogin: no version returned") + } else if res[0].Version > 2 { + return fmt.Errorf("prelogin: version %d account not supported", res[0].Version) + } else if res[0].Version == 2 { + if len(res[0].Salt) == 0 { + return errors.New("prelogin: no salt returned") + } + m.accountSalt, err = base64urldecode(res[0].Salt) + if err != nil { + return err + } + } + m.accountVersion = res[0].Version + + return nil +} + // Authenticate and start a session -func (m *Mega) Login(email string, passwd string) error { +func (m *Mega) login(email string, passwd string) error { var msg [1]LoginMsg var res [1]LoginResp var err error var result []byte - passkey := password_key(passwd) - uhandle := stringhash(email, passkey) + email = strings.ToLower(email) // mega uses lowercased emails for login purposes + + passkey, err := password_key(passwd) + if err != nil { + return err + } + uhandle, err := stringhash(email, passkey) + if err != nil { + return err + } m.uh = make([]byte, len(uhandle)) copy(m.uh, uhandle) msg[0].Cmd = "us" msg[0].User = email - msg[0].Handle = string(uhandle) + if m.accountVersion == 1 { + msg[0].Handle = uhandle + } else { + const derivedKeyLength = 2 * aes.BlockSize + derivedKey := pbkdf2.Key([]byte(passwd), m.accountSalt, 100000, derivedKeyLength, sha512.New) + authKey := derivedKey[aes.BlockSize:] + passkey = derivedKey[:aes.BlockSize] - req, _ := json.Marshal(msg) - result, err = m.api_request(req) + sessionKey := make([]byte, aes.BlockSize) + _, err = rand.Read(sessionKey) + if err != nil { + return err + } + msg[0].Handle = base64urlencode(authKey) + msg[0].SessionKey = base64urlencode(sessionKey) + } + req, err := json.Marshal(msg) + if err != nil { + return err + } + result, err = m.api_request(req) if err != nil { return err } @@ -485,10 +562,30 @@ func (m *Mega) Login(email string, passwd string) error { return err } - m.k = base64urldecode([]byte(res[0].Key)) + m.k, err = base64urldecode(res[0].Key) + if err != nil { + return err + } cipher, err := aes.NewCipher(passkey) + if err != nil { + return err + } cipher.Decrypt(m.k, m.k) - m.sid, err = decryptSessionId([]byte(res[0].Privk), []byte(res[0].Csid), m.k) + m.sid, err = decryptSessionId(res[0].Privk, res[0].Csid, m.k) + if err != nil { + return err + } + return nil +} + +// Authenticate and start a session +func (m *Mega) Login(email string, passwd string) error { + err := m.prelogin(email) + if err != nil { + return err + } + + err = m.login(email, passwd) if err != nil { return err } @@ -557,9 +654,11 @@ func (m *Mega) GetUser() (UserResp, error) { msg[0].Cmd = "ug" - req, _ := json.Marshal(msg) + req, err := json.Marshal(msg) + if err != nil { + return res[0], err + } result, err := m.api_request(req) - if err != nil { return res[0], err } @@ -577,7 +676,10 @@ func (m *Mega) GetQuota() (QuotaResp, error) { msg[0].Xfer = 1 msg[0].Strg = 1 - req, _ := json.Marshal(msg) + req, err := json.Marshal(msg) + if err != nil { + return res[0], err + } result, err := m.api_request(req) if err != nil { return res[0], err @@ -594,65 +696,120 @@ func (m *Mega) addFSNode(itm FSNode) (*Node, error) { var node, parent *Node var err error - master_aes, _ := aes.NewCipher(m.k) + master_aes, err := aes.NewCipher(m.k) + if err != nil { + return nil, err + } switch { case itm.T == FOLDER || itm.T == FILE: args := strings.Split(itm.Key, ":") + if len(args) < 2 { + return nil, fmt.Errorf("not enough : in item.Key: %q", itm.Key) + } + itemUser, itemKey := args[0], args[1] + itemKeyParts := strings.Split(itemKey, "/") + if len(itemKeyParts) >= 2 { + itemKey = itemKeyParts[0] + // the other part is maybe a share key handle? + } switch { // File or folder owned by current user - case args[0] == itm.User: - buf := base64urldecode([]byte(args[1])) + case itemUser == itm.User: + buf, err := base64urldecode(itemKey) + if err != nil { + return nil, err + } err = blockDecrypt(master_aes, buf, buf) if err != nil { return nil, err } - compkey = bytes_to_a32(buf) + compkey, err = bytes_to_a32(buf) + if err != nil { + return nil, err + } // Shared folder case itm.SUser != "" && itm.SKey != "": - sk := base64urldecode([]byte(itm.SKey)) + sk, err := base64urldecode(itm.SKey) + if err != nil { + return nil, err + } err = blockDecrypt(master_aes, sk, sk) if err != nil { return nil, err } - sk_aes, _ := aes.NewCipher(sk) + sk_aes, err := aes.NewCipher(sk) + if err != nil { + return nil, err + } m.FS.skmap[itm.Hash] = itm.SKey - buf := base64urldecode([]byte(args[1])) + buf, err := base64urldecode(itemKey) + if err != nil { + return nil, err + } err = blockDecrypt(sk_aes, buf, buf) if err != nil { return nil, err } - compkey = bytes_to_a32(buf) + compkey, err = bytes_to_a32(buf) + if err != nil { + return nil, err + } // Shared file default: - k := m.FS.skmap[args[0]] - b := base64urldecode([]byte(k)) + k, ok := m.FS.skmap[itemUser] + if !ok { + return nil, errors.New("couldn't find decryption key for shared file") + } + b, err := base64urldecode(k) + if err != nil { + return nil, err + } err = blockDecrypt(master_aes, b, b) if err != nil { return nil, err } - block, _ := aes.NewCipher(b) - buf := base64urldecode([]byte(args[1])) + block, err := aes.NewCipher(b) + if err != nil { + return nil, err + } + buf, err := base64urldecode(itemKey) + if err != nil { + return nil, err + } err = blockDecrypt(block, buf, buf) if err != nil { return nil, err } - compkey = bytes_to_a32(buf) + compkey, err = bytes_to_a32(buf) + if err != nil { + return nil, err + } } switch { case itm.T == FILE: + if len(compkey) < 8 { + m.logf("ignoring item: compkey too short (%d): %#v", len(compkey), itm) + return nil, nil + } key = []uint32{compkey[0] ^ compkey[4], compkey[1] ^ compkey[5], compkey[2] ^ compkey[6], compkey[3] ^ compkey[7]} default: key = compkey } - attr, err = decryptAttr(a32_to_bytes(key), []byte(itm.Attr)) - // FIXME: + bkey, err := a32_to_bytes(key) if err != nil { + // FIXME: attr.Name = "BAD ATTRIBUTE" + } else { + attr, err = decryptAttr(bkey, itm.Attr) + // FIXME: + if err != nil { + attr.Name = "BAD ATTRIBUTE" + } } } @@ -692,15 +849,33 @@ func (m *Mega) addFSNode(itm FSNode) (*Node, error) { switch { case itm.T == FILE: var meta NodeMeta - meta.key = a32_to_bytes(key) - meta.iv = a32_to_bytes([]uint32{compkey[4], compkey[5], 0, 0}) - meta.mac = a32_to_bytes([]uint32{compkey[6], compkey[7]}) - meta.compkey = a32_to_bytes(compkey) + meta.key, err = a32_to_bytes(key) + if err != nil { + return nil, err + } + meta.iv, err = a32_to_bytes([]uint32{compkey[4], compkey[5], 0, 0}) + if err != nil { + return nil, err + } + meta.mac, err = a32_to_bytes([]uint32{compkey[6], compkey[7]}) + if err != nil { + return nil, err + } + meta.compkey, err = a32_to_bytes(compkey) + if err != nil { + return nil, err + } node.meta = meta case itm.T == FOLDER: var meta NodeMeta - meta.key = a32_to_bytes(key) - meta.compkey = a32_to_bytes(compkey) + meta.key, err = a32_to_bytes(key) + if err != nil { + return nil, err + } + meta.compkey, err = a32_to_bytes(compkey) + if err != nil { + return nil, err + } node.meta = meta case itm.T == ROOT: attr.Name = "Cloud Drive" @@ -737,9 +912,11 @@ func (m *Mega) getFileSystem() error { msg[0].Cmd = "f" msg[0].C = 1 - req, _ := json.Marshal(msg) + req, err := json.Marshal(msg) + if err != nil { + return err + } result, err := m.api_request(req) - if err != nil { return err } @@ -756,7 +933,8 @@ func (m *Mega) getFileSystem() error { for _, itm := range res[0].F { _, err = m.addFSNode(itm) if err != nil { - return err + m.debugf("couldn't decode FSNode %#v: %v ", itm, err) + continue } } @@ -817,7 +995,12 @@ func (m *Mega) NewDownload(src *Node) (*Download, error) { return nil, err } - _, err = decryptAttr(key, []byte(res[0].Attr)) + // DownloadResp has an embedded error in it for some reason + if res[0].Err != 0 { + return nil, parseError(res[0].Err) + } + + _, err = decryptAttr(key, res[0].Attr) if err != nil { return nil, err } @@ -831,9 +1014,15 @@ func (m *Mega) NewDownload(src *Node) (*Download, error) { mac_enc := cipher.NewCBCEncrypter(aes_block, zero_iv) m.FS.mutex.Lock() - t := bytes_to_a32(src.meta.iv) + t, err := bytes_to_a32(src.meta.iv) m.FS.mutex.Unlock() - iv := a32_to_bytes([]uint32{t[0], t[1], t[0], t[1]}) + if err != nil { + return nil, err + } + iv, err := a32_to_bytes([]uint32{t[0], t[1], t[0], t[1]}) + if err != nil { + return nil, err + } d := &Download{ m: m, @@ -915,10 +1104,17 @@ func (d *Download) DownloadChunk(id int) (chunk []byte, err error) { } // Decrypt the block - ctr_iv := bytes_to_a32(d.src.meta.iv) + ctr_iv, err := bytes_to_a32(d.src.meta.iv) + if err != nil { + return nil, err + } ctr_iv[2] = uint32(uint64(chk_start) / 0x1000000000) ctr_iv[3] = uint32(chk_start / 0x10) - ctr_aes := cipher.NewCTR(d.aes_block, a32_to_bytes(ctr_iv)) + bctr_iv, err := a32_to_bytes(ctr_iv) + if err != nil { + return nil, err + } + ctr_aes := cipher.NewCTR(d.aes_block, bctr_iv) ctr_aes.XORKeyStream(chunk, chunk) // Update the chunk_macs @@ -958,8 +1154,15 @@ func (d *Download) Finish() (err error) { d.mac_enc.CryptBlocks(mac_data, v) } - tmac := bytes_to_a32(mac_data) - if bytes.Equal(a32_to_bytes([]uint32{tmac[0] ^ tmac[1], tmac[2] ^ tmac[3]}), d.src.meta.mac) == false { + tmac, err := bytes_to_a32(mac_data) + if err != nil { + return err + } + btmac, err := a32_to_bytes([]uint32{tmac[0] ^ tmac[1], tmac[2] ^ tmac[3]}) + if err != nil { + return err + } + if bytes.Equal(btmac, d.src.meta.mac) == false { return EMACMISMATCH } @@ -1111,12 +1314,24 @@ func (m *Mega) NewUpload(parent *Node, name string, fileSize int64) (*Upload, er } - kbytes := a32_to_bytes(ukey[:4]) - kiv := a32_to_bytes([]uint32{ukey[4], ukey[5], 0, 0}) - aes_block, _ := aes.NewCipher(kbytes) + kbytes, err := a32_to_bytes(ukey[:4]) + if err != nil { + return nil, err + } + kiv, err := a32_to_bytes([]uint32{ukey[4], ukey[5], 0, 0}) + if err != nil { + return nil, err + } + aes_block, err := aes.NewCipher(kbytes) + if err != nil { + return nil, err + } mac_enc := cipher.NewCBCEncrypter(aes_block, zero_iv) - iv := a32_to_bytes([]uint32{ukey[4], ukey[5], ukey[4], ukey[5]}) + iv, err := a32_to_bytes([]uint32{ukey[4], ukey[5], ukey[4], ukey[5]}) + if err != nil { + return nil, err + } chunks := getChunkSizes(fileSize) @@ -1166,10 +1381,17 @@ func (u *Upload) UploadChunk(id int, chunk []byte) (err error) { if len(chunk) != chk_size { return errors.New("upload chunk is wrong size") } - ctr_iv := bytes_to_a32(u.kiv) + ctr_iv, err := bytes_to_a32(u.kiv) + if err != nil { + return err + } ctr_iv[2] = uint32(uint64(chk_start) / 0x1000000000) ctr_iv[3] = uint32(chk_start / 0x10) - ctr_aes := cipher.NewCTR(u.aes_block, a32_to_bytes(ctr_iv)) + bctr_iv, err := a32_to_bytes(ctr_iv) + if err != nil { + return err + } + ctr_aes := cipher.NewCTR(u.aes_block, bctr_iv) enc := cipher.NewCBCEncrypter(u.aes_block, u.iv) @@ -1247,7 +1469,10 @@ func (u *Upload) Finish() (node *Node, err error) { u.mac_enc.CryptBlocks(mac_data, v) } - t := bytes_to_a32(mac_data) + t, err := bytes_to_a32(mac_data) + if err != nil { + return nil, err + } meta_mac := []uint32{t[0] ^ t[1], t[2] ^ t[3]} attr := FileAttr{u.name} @@ -1261,7 +1486,10 @@ func (u *Upload) Finish() (node *Node, err error) { u.ukey[2] ^ meta_mac[0], u.ukey[3] ^ meta_mac[1], u.ukey[4], u.ukey[5], meta_mac[0], meta_mac[1]} - buf := a32_to_bytes(key) + buf, err := a32_to_bytes(key) + if err != nil { + return nil, err + } master_aes, err := aes.NewCipher(u.m.k) if err != nil { return nil, err @@ -1278,8 +1506,8 @@ func (u *Upload) Finish() (node *Node, err error) { cmsg[0].T = u.parenthash cmsg[0].N[0].H = string(u.completion_handle) cmsg[0].N[0].T = FILE - cmsg[0].N[0].A = string(attr_data) - cmsg[0].N[0].K = string(base64urlencode(buf)) + cmsg[0].N[0].A = attr_data + cmsg[0].N[0].K = base64urlencode(buf) request, err := json.Marshal(cmsg) if err != nil { @@ -1301,7 +1529,7 @@ func (u *Upload) Finish() (node *Node, err error) { } // Upload a file to the filesystem -func (m *Mega) UploadFile(srcpath string, parent *Node, name string, progress *chan int) (*Node, error) { +func (m *Mega) UploadFile(srcpath string, parent *Node, name string, progress *chan int) (node *Node, err error) { defer func() { if progress != nil { close(*progress) @@ -1320,6 +1548,12 @@ func (m *Mega) UploadFile(srcpath string, parent *Node, name string, progress *c if err != nil { return nil, err } + defer func() { + e := infile.Close() + if err == nil { + err = e + } + }() if name == "" { name = filepath.Base(srcpath) @@ -1411,9 +1645,11 @@ func (m *Mega) Move(src *Node, parent *Node) error { return err } - request, _ := json.Marshal(msg) + request, err := json.Marshal(msg) + if err != nil { + return err + } _, err = m.api_request(request) - if err != nil { return err } @@ -1438,30 +1674,42 @@ func (m *Mega) Rename(src *Node, name string) error { } var msg [1]FileAttrMsg - master_aes, _ := aes.NewCipher(m.k) + master_aes, err := aes.NewCipher(m.k) + if err != nil { + return err + } attr := FileAttr{name} - attr_data, _ := encryptAttr(src.meta.key, attr) + attr_data, err := encryptAttr(src.meta.key, attr) + if err != nil { + return err + } key := make([]byte, len(src.meta.compkey)) - err := blockEncrypt(master_aes, key, src.meta.compkey) + err = blockEncrypt(master_aes, key, src.meta.compkey) if err != nil { return err } msg[0].Cmd = "a" - msg[0].Attr = string(attr_data) - msg[0].Key = string(base64urlencode(key)) + msg[0].Attr = attr_data + msg[0].Key = base64urlencode(key) msg[0].N = src.hash msg[0].I, err = randString(10) if err != nil { return err } - req, _ := json.Marshal(msg) + req, err := json.Marshal(msg) + if err != nil { + return err + } _, err = m.api_request(req) + if err != nil { + return err + } src.name = name - return err + return nil } // Create a directory in the filesystem @@ -1480,12 +1728,21 @@ func (m *Mega) CreateDir(name string, parent *Node) (*Node, error) { compkey[i] = uint32(mrand.Int31()) } - master_aes, _ := aes.NewCipher(m.k) + master_aes, err := aes.NewCipher(m.k) + if err != nil { + return nil, err + } attr := FileAttr{name} - ukey := a32_to_bytes(compkey[:4]) - attr_data, _ := encryptAttr(ukey, attr) + ukey, err := a32_to_bytes(compkey[:4]) + if err != nil { + return nil, err + } + attr_data, err := encryptAttr(ukey, attr) + if err != nil { + return nil, err + } key := make([]byte, len(ukey)) - err := blockEncrypt(master_aes, key, ukey) + err = blockEncrypt(master_aes, key, ukey) if err != nil { return nil, err } @@ -1494,14 +1751,17 @@ func (m *Mega) CreateDir(name string, parent *Node) (*Node, error) { msg[0].T = parent.hash msg[0].N[0].H = "xxxxxxxx" msg[0].N[0].T = FOLDER - msg[0].N[0].A = string(attr_data) - msg[0].N[0].K = string(base64urlencode(key)) + msg[0].N[0].A = attr_data + msg[0].N[0].K = base64urlencode(key) msg[0].I, err = randString(10) if err != nil { return nil, err } - req, _ := json.Marshal(msg) + req, err := json.Marshal(msg) + if err != nil { + return nil, err + } result, err := m.api_request(req) if err != nil { return nil, err @@ -1537,14 +1797,20 @@ func (m *Mega) Delete(node *Node, destroy bool) error { return err } - req, _ := json.Marshal(msg) + req, err := json.Marshal(msg) + if err != nil { + return err + } _, err = m.api_request(req) + if err != nil { + return err + } parent := m.FS.lookup[node.hash] parent.removeChild(node) delete(m.FS.lookup, node.hash) - return err + return nil } // process an add node event @@ -1579,7 +1845,10 @@ func (m *Mega) processUpdateNode(evRaw []byte) error { } node := m.FS.hashLookup(ev.N) - attr, err := decryptAttr(node.meta.key, []byte(ev.Attr)) + if node == nil { + return ENOENT + } + attr, err := decryptAttr(node.meta.key, ev.Attr) if err == nil { node.name = attr.Name } else { @@ -1623,7 +1892,7 @@ func (m *Mega) pollEvents() { sleepTime = minSleepTime } - url := fmt.Sprintf("%s/sc?sn=%s&sid=%s", m.baseurl, m.ssn, string(m.sid)) + url := fmt.Sprintf("%s/sc?sn=%s&sid=%s", m.baseurl, m.ssn, m.sid) resp, err = m.client.Post(url, "application/xml", nil) if err != nil { m.logf("pollEvents: Error fetching status: %s", err) @@ -1753,9 +2022,11 @@ func (m *Mega) getLink(n *Node) (string, error) { msg[0].Cmd = "l" msg[0].N = n.GetHash() - req, _ := json.Marshal(msg) + req, err := json.Marshal(msg) + if err != nil { + return "", err + } result, err := m.api_request(req) - if err != nil { return "", err } @@ -1774,7 +2045,7 @@ func (m *Mega) Link(n *Node, includeKey bool) (string, error) { } if includeKey { m.FS.mutex.Lock() - key := string(base64urlencode(n.meta.compkey)) + key := base64urlencode(n.meta.compkey) m.FS.mutex.Unlock() return fmt.Sprintf("%v/#!%v!%v", BASE_DOWNLOAD_URL, id, key), nil } else { diff --git a/vendor/github.com/t3rm1n4l/go-mega/mega_test.go b/vendor/github.com/t3rm1n4l/go-mega/mega_test.go deleted file mode 100644 index 30a5556..0000000 --- a/vendor/github.com/t3rm1n4l/go-mega/mega_test.go +++ /dev/null @@ -1,403 +0,0 @@ -package mega - -import ( - "crypto/md5" - "crypto/rand" - "fmt" - "io/ioutil" - "os" - "path" - "sync" - "testing" - "time" -) - -var USER string = os.Getenv("MEGA_USER") -var PASSWORD string = os.Getenv("MEGA_PASSWD") - -// retry runs fn until it succeeds, using what to log and retrying on -// EAGAIN. It uses exponential backoff -func retry(t *testing.T, what string, fn func() error) { - const maxTries = 10 - var err error - sleep := 100 * time.Millisecond - for i := 1; i <= maxTries; i++ { - err = fn() - if err == nil { - return - } - if err != EAGAIN { - break - } - t.Logf("%s failed %d/%d - retrying after %v sleep", what, i, maxTries, sleep) - time.Sleep(sleep) - sleep *= 2 - } - t.Fatalf("%s failed: %v", what, err) -} - -func skipIfNoCredentials(t *testing.T) { - if USER == "" || PASSWORD == "" { - t.Skip("MEGA_USER and MEGA_PASSWD not set - skipping integration tests") - } -} - -func initSession(t *testing.T) *Mega { - skipIfNoCredentials(t) - m := New() - // m.SetDebugger(log.Printf) - retry(t, "Login", func() error { - return m.Login(USER, PASSWORD) - }) - return m -} - -// createFile creates a temporary file of a given size along with its MD5SUM -func createFile(t *testing.T, size int64) (string, string) { - b := make([]byte, size) - _, err := rand.Read(b) - if err != nil { - t.Fatalf("Error reading rand: %v", err) - } - file, err := ioutil.TempFile("/tmp/", "gomega-") - if err != nil { - t.Fatalf("Error creating temp file: %v", err) - } - _, err = file.Write(b) - if err != nil { - t.Fatalf("Error writing temp file: %v", err) - } - h := md5.New() - _, err = h.Write(b) - if err != nil { - t.Fatalf("Error on Write while writing temp file: %v", err) - } - return file.Name(), fmt.Sprintf("%x", h.Sum(nil)) -} - -// uploadFile uploads a temporary file of a given size returning the -// node, name and its MD5SUM -func uploadFile(t *testing.T, session *Mega, size int64, parent *Node) (node *Node, name string, md5sum string) { - name, md5sum = createFile(t, size) - defer func() { - _ = os.Remove(name) - }() - var err error - retry(t, fmt.Sprintf("Upload %q", name), func() error { - node, err = session.UploadFile(name, parent, "", nil) - return err - }) - if node == nil { - t.Fatalf("Failed to obtain node after upload for %q", name) - } - return node, name, md5sum -} - -// createDir creates a directory under parent -func createDir(t *testing.T, session *Mega, name string, parent *Node) (node *Node) { - var err error - retry(t, fmt.Sprintf("Create directory %q", name), func() error { - node, err = session.CreateDir(name, parent) - return err - }) - return node -} - -func fileMD5(t *testing.T, name string) string { - file, err := os.Open(name) - if err != nil { - t.Fatalf("Failed to open %q: %v", name, err) - } - b, err := ioutil.ReadAll(file) - if err != nil { - t.Fatalf("Failed to read all %q: %v", name, err) - } - h := md5.New() - _, err = h.Write(b) - if err != nil { - t.Fatalf("Error on hash in fileMD5: %v", err) - } - return fmt.Sprintf("%x", h.Sum(nil)) -} - -func TestLogin(t *testing.T) { - skipIfNoCredentials(t) - - m := New() - retry(t, "Login", func() error { - return m.Login(USER, PASSWORD) - }) -} - -func TestGetUser(t *testing.T) { - session := initSession(t) - _, err := session.GetUser() - if err != nil { - t.Fatal("GetUser failed", err) - } -} - -func TestUploadDownload(t *testing.T) { - session := initSession(t) - node, name, h1 := uploadFile(t, session, 314573, session.FS.root) - - session.FS.mutex.Lock() - phash := session.FS.root.hash - n := session.FS.lookup[node.hash] - if n.parent.hash != phash { - t.Error("Parent of uploaded file mismatch") - } - session.FS.mutex.Unlock() - - err := session.DownloadFile(node, name, nil) - if err != nil { - t.Fatal("Download failed", err) - } - - h2 := fileMD5(t, name) - err = os.Remove(name) - if err != nil { - t.Error("Failed to remove file", err) - } - - if h1 != h2 { - t.Error("MD5 mismatch for downloaded file") - } -} - -func TestMove(t *testing.T) { - session := initSession(t) - node, _, _ := uploadFile(t, session, 31, session.FS.root) - - hash := node.hash - phash := session.FS.trash.hash - err := session.Move(node, session.FS.trash) - if err != nil { - t.Fatal("Move failed", err) - } - - session.FS.mutex.Lock() - n := session.FS.lookup[hash] - if n.parent.hash != phash { - t.Error("Move happened to wrong parent", phash, n.parent.hash) - } - session.FS.mutex.Unlock() -} - -func TestRename(t *testing.T) { - session := initSession(t) - node, _, _ := uploadFile(t, session, 31, session.FS.root) - - err := session.Rename(node, "newname.txt") - if err != nil { - t.Fatal("Rename failed", err) - } - - session.FS.mutex.Lock() - newname := session.FS.lookup[node.hash].name - if newname != "newname.txt" { - t.Error("Renamed to wrong name", newname) - } - session.FS.mutex.Unlock() -} - -func TestDelete(t *testing.T) { - session := initSession(t) - node, _, _ := uploadFile(t, session, 31, session.FS.root) - - retry(t, "Soft delete", func() error { - return session.Delete(node, false) - }) - - session.FS.mutex.Lock() - node = session.FS.lookup[node.hash] - if node.parent != session.FS.trash { - t.Error("Expects file to be moved to trash") - } - session.FS.mutex.Unlock() - - retry(t, "Hard delete", func() error { - return session.Delete(node, true) - }) - - time.Sleep(1 * time.Second) // wait for the event - - session.FS.mutex.Lock() - if _, ok := session.FS.lookup[node.hash]; ok { - t.Error("Expects file to be dissapeared") - } - session.FS.mutex.Unlock() -} - -func TestCreateDir(t *testing.T) { - session := initSession(t) - node := createDir(t, session, "testdir1", session.FS.root) - node2 := createDir(t, session, "testdir2", node) - - session.FS.mutex.Lock() - nnode2 := session.FS.lookup[node2.hash] - if nnode2.parent.hash != node.hash { - t.Error("Wrong directory parent") - } - session.FS.mutex.Unlock() -} - -func TestConfig(t *testing.T) { - skipIfNoCredentials(t) - - m := New() - m.SetAPIUrl("http://invalid.domain") - err := m.Login(USER, PASSWORD) - if err == nil { - t.Error("API Url: Expected failure") - } - - err = m.SetDownloadWorkers(100) - if err != EWORKER_LIMIT_EXCEEDED { - t.Error("Download: Expected EWORKER_LIMIT_EXCEEDED error") - } - - err = m.SetUploadWorkers(100) - if err != EWORKER_LIMIT_EXCEEDED { - t.Error("Upload: Expected EWORKER_LIMIT_EXCEEDED error") - } - - // TODO: Add timeout test cases - -} - -func TestPathLookup(t *testing.T) { - session := initSession(t) - - rs, err := randString(5) - if err != nil { - t.Fatalf("failed to make random string: %v", err) - } - node1 := createDir(t, session, "dir-1-"+rs, session.FS.root) - node21 := createDir(t, session, "dir-2-1-"+rs, node1) - node22 := createDir(t, session, "dir-2-2-"+rs, node1) - node31 := createDir(t, session, "dir-3-1-"+rs, node21) - node32 := createDir(t, session, "dir-3-2-"+rs, node22) - _ = node32 - - _, name1, _ := uploadFile(t, session, 31, node31) - _, _, _ = uploadFile(t, session, 31, node31) - _, name3, _ := uploadFile(t, session, 31, node22) - - testpaths := [][]string{ - {"dir-1-" + rs, "dir-2-2-" + rs, path.Base(name3)}, - {"dir-1-" + rs, "dir-2-1-" + rs, "dir-3-1-" + rs}, - {"dir-1-" + rs, "dir-2-1-" + rs, "dir-3-1-" + rs, path.Base(name1)}, - {"dir-1-" + rs, "dir-2-1-" + rs, "none"}, - } - - results := []error{nil, nil, nil, ENOENT} - - for i, tst := range testpaths { - ns, e := session.FS.PathLookup(session.FS.root, tst) - switch { - case e != results[i]: - t.Errorf("Test %d failed: wrong result", i) - default: - if results[i] == nil && len(tst) != len(ns) { - t.Errorf("Test %d failed: result array len (%d) mismatch", i, len(ns)) - - } - - arr := []string{} - for n := range ns { - if tst[n] != ns[n].name { - t.Errorf("Test %d failed: result node mismatches (%v) and (%v)", i, tst, arr) - break - } - arr = append(arr, tst[n]) - } - } - } -} - -func TestEventNotify(t *testing.T) { - session1 := initSession(t) - session2 := initSession(t) - - node, _, _ := uploadFile(t, session1, 31, session1.FS.root) - - for i := 0; i < 60; i++ { - time.Sleep(time.Second * 1) - node = session2.FS.HashLookup(node.GetHash()) - if node != nil { - break - } - } - - if node == nil { - t.Fatal("Expects file to found in second client's FS") - } - - retry(t, "Delete", func() error { - return session2.Delete(node, true) - }) - - time.Sleep(time.Second * 5) - node = session1.FS.HashLookup(node.hash) - if node != nil { - t.Fatal("Expects file to not-found in first client's FS") - } -} - -func TestExportLink(t *testing.T) { - session := initSession(t) - node, _, _ := uploadFile(t, session, 31, session.FS.root) - - // Don't include decryption key - retry(t, "Failed to export link (key not included)", func() error { - _, err := session.Link(node, false) - return err - }) - - // Do include decryption key - retry(t, "Failed to export link (key included)", func() error { - _, err := session.Link(node, true) - return err - }) -} - -func TestWaitEvents(t *testing.T) { - m := &Mega{} - m.SetLogger(t.Logf) - m.SetDebugger(t.Logf) - var wg sync.WaitGroup - // in the background fire the event timer after 100mS - wg.Add(1) - go func() { - time.Sleep(100 * time.Millisecond) - m.waitEventsFire() - wg.Done() - }() - wait := func(d time.Duration, pb *bool) { - e := m.WaitEventsStart() - *pb = m.WaitEvents(e, d) - wg.Done() - } - // wait for each event in a separate goroutine - var b1, b2, b3 bool - wg.Add(3) - go wait(10*time.Second, &b1) - go wait(2*time.Second, &b2) - go wait(1*time.Millisecond, &b3) - wg.Wait() - if b1 != false { - t.Errorf("Unexpected timeout for b1") - } - if b2 != false { - t.Errorf("Unexpected timeout for b2") - } - if b3 != true { - t.Errorf("Unexpected event for b3") - } - if m.waitEvents != nil { - t.Errorf("Expecting waitEvents to be empty") - } - // Check nothing happens if we fire the event with no listeners - m.waitEventsFire() -} diff --git a/vendor/github.com/t3rm1n4l/go-mega/messages.go b/vendor/github.com/t3rm1n4l/go-mega/messages.go index da3639f..8b60c1a 100644 --- a/vendor/github.com/t3rm1n4l/go-mega/messages.go +++ b/vendor/github.com/t3rm1n4l/go-mega/messages.go @@ -2,16 +2,32 @@ package mega import "encoding/json" +type PreloginMsg struct { + Cmd string `json:"a"` + User string `json:"user"` +} + +type PreloginResp struct { + Version int `json:"v"` + Salt string `json:"s"` +} + type LoginMsg struct { - Cmd string `json:"a"` - User string `json:"user"` - Handle string `json:"uh"` + Cmd string `json:"a"` + User string `json:"user"` + Handle string `json:"uh"` + SessionKey string `json:"sek,omitempty"` + Si string `json:"si,omitempty"` + Mfa string `json:"mfa,omitempty"` } type LoginResp struct { - Csid string `json:"csid"` - Privk string `json:"privk"` - Key string `json:"k"` + Csid string `json:"csid"` + Privk string `json:"privk"` + Key string `json:"k"` + Ach int `json:"ach"` + SessionKey string `json:"sek"` + U string `json:"u"` } type UserMsg struct { @@ -104,10 +120,10 @@ type DownloadMsg struct { } type DownloadResp struct { - G string `json:"g"` - Size uint64 `json:"s"` - Attr string `json:"at"` - Err uint32 `json:"e"` + G string `json:"g"` + Size uint64 `json:"s"` + Attr string `json:"at"` + Err ErrorMsg `json:"e"` } type UploadMsg struct { diff --git a/vendor/github.com/t3rm1n4l/go-mega/utils.go b/vendor/github.com/t3rm1n4l/go-mega/utils.go index 959dc5d..c279818 100644 --- a/vendor/github.com/t3rm1n4l/go-mega/utils.go +++ b/vendor/github.com/t3rm1n4l/go-mega/utils.go @@ -37,73 +37,69 @@ func newHttpClient(timeout time.Duration) *http.Client { // bytes_to_a32 converts the byte slice b to uint32 slice considering // the bytes to be in big endian order. -func bytes_to_a32(b []byte) []uint32 { +func bytes_to_a32(b []byte) ([]uint32, error) { length := len(b) + 3 a := make([]uint32, length/4) buf := bytes.NewBuffer(b) for i, _ := range a { - _ = binary.Read(buf, binary.BigEndian, &a[i]) + err := binary.Read(buf, binary.BigEndian, &a[i]) + if err != nil { + return nil, err + } } - return a + return a, nil } // a32_to_bytes converts the uint32 slice a to byte slice where each // uint32 is decoded in big endian order. -func a32_to_bytes(a []uint32) []byte { +func a32_to_bytes(a []uint32) ([]byte, error) { buf := new(bytes.Buffer) buf.Grow(len(a) * 4) // To prevent reallocations in Write for _, v := range a { - _ = binary.Write(buf, binary.BigEndian, v) + err := binary.Write(buf, binary.BigEndian, v) + if err != nil { + return nil, err + } } - return buf.Bytes() + return buf.Bytes(), nil } -// base64urlencode encodes byte slice b using base64 url encoding. -// It removes `=` padding when necessary -func base64urlencode(b []byte) []byte { - enc := base64.URLEncoding - encSize := enc.EncodedLen(len(b)) - buf := make([]byte, encSize) - enc.Encode(buf, b) - - paddSize := 3 - len(b)%3 - if paddSize < 3 { - encSize -= paddSize - buf = buf[:encSize] - } - - return buf +// base64urlencode encodes byte slice b using base64 url encoding +// without `=` padding. +func base64urlencode(b []byte) string { + return base64.RawURLEncoding.EncodeToString(b) } -// base64urldecode decodes the byte slice b using base64 url decoding. -// It adds required '=' padding before decoding. -func base64urldecode(b []byte) []byte { - enc := base64.URLEncoding - padSize := 4 - len(b)%4 - - switch padSize { - case 1: - b = append(b, '=') - case 2: - b = append(b, '=', '=') - } - - decSize := enc.DecodedLen(len(b)) - buf := make([]byte, decSize) - n, _ := enc.Decode(buf, b) - return buf[:n] +// base64urldecode decodes the byte slice b using unpadded base64 url +// decoding. It also allows the characters from standard base64 to be +// compatible with the mega decoder. +func base64urldecode(s string) ([]byte, error) { + enc := base64.RawURLEncoding + // mega base64 decoder accepts the characters from both URLEncoding and StdEncoding + // though nearly all strings are URL encoded + s = strings.Replace(s, "+", "-", -1) + s = strings.Replace(s, "/", "_", -1) + return enc.DecodeString(s) } // base64_to_a32 converts base64 encoded byte slice b to uint32 slice. -func base64_to_a32(b []byte) []uint32 { - return bytes_to_a32(base64urldecode(b)) +func base64_to_a32(s string) ([]uint32, error) { + d, err := base64urldecode(s) + if err != nil { + return nil, err + } + return bytes_to_a32(d) } // a32_to_base64 converts uint32 slice to base64 encoded byte slice. -func a32_to_base64(a []uint32) []byte { - return base64urlencode(a32_to_bytes(a)) +func a32_to_base64(a []uint32) (string, error) { + d, err := a32_to_bytes(a) + if err != nil { + return "", err + } + return base64urlencode(d), nil } // paddnull pads byte slice b such that the size of resulting byte @@ -121,10 +117,16 @@ func paddnull(b []byte, q int) []byte { } // password_key calculates password hash from the user password. -func password_key(p string) []byte { - a := bytes_to_a32(paddnull([]byte(p), 4)) +func password_key(p string) ([]byte, error) { + a, err := bytes_to_a32(paddnull([]byte(p), 4)) + if err != nil { + return nil, err + } - pkey := a32_to_bytes([]uint32{0x93C467E3, 0x7DB0C7A4, 0xD1BE3F81, 0x0152CB56}) + pkey, err := a32_to_bytes([]uint32{0x93C467E3, 0x7DB0C7A4, 0xD1BE3F81, 0x0152CB56}) + if err != nil { + return nil, err + } n := (len(a) + 3) / 4 @@ -137,7 +139,14 @@ func password_key(p string) []byte { key[k] = a[k+j] } } - ciphers[j/4], _ = aes.NewCipher(a32_to_bytes(key)) // Uses AES in ECB mode + bkey, err := a32_to_bytes(key) + if err != nil { + return nil, err + } + ciphers[j/4], err = aes.NewCipher(bkey) // Uses AES in ECB mode + if err != nil { + return nil, err + } } for i := 65536; i > 0; i-- { @@ -146,24 +155,36 @@ func password_key(p string) []byte { } } - return pkey + return pkey, nil } // stringhash computes generic string hash. Uses k as the key for AES // cipher. -func stringhash(s string, k []byte) []byte { - a := bytes_to_a32(paddnull([]byte(s), 4)) +func stringhash(s string, k []byte) (string, error) { + a, err := bytes_to_a32(paddnull([]byte(s), 4)) + if err != nil { + return "", err + } h := []uint32{0, 0, 0, 0} for i, v := range a { h[i&3] ^= v } - hb := a32_to_bytes(h) - cipher, _ := aes.NewCipher(k) + hb, err := a32_to_bytes(h) + if err != nil { + return "", err + } + cipher, err := aes.NewCipher(k) + if err != nil { + return "", err + } for i := 16384; i > 0; i-- { cipher.Encrypt(hb, hb) } - ha := bytes_to_a32(paddnull(hb, 4)) + ha, err := bytes_to_a32(paddnull(hb, 4)) + if err != nil { + return "", err + } return a32_to_base64([]uint32{ha[0], ha[2]}) } @@ -230,16 +251,25 @@ func blockEncrypt(blk cipher.Block, dst, src []byte) error { // decryptSeessionId decrypts the session id using the given private // key. -func decryptSessionId(privk []byte, csid []byte, mk []byte) ([]byte, error) { +func decryptSessionId(privk string, csid string, mk []byte) (string, error) { - block, _ := aes.NewCipher(mk) - pk := base64urldecode(privk) - err := blockDecrypt(block, pk, pk) + block, err := aes.NewCipher(mk) if err != nil { - return nil, err + return "", err + } + pk, err := base64urldecode(privk) + if err != nil { + return "", err + } + err = blockDecrypt(block, pk, pk) + if err != nil { + return "", err } - c := base64urldecode(csid) + c, err := base64urldecode(csid) + if err != nil { + return "", err + } m, _ := getMPI(c) @@ -277,16 +307,23 @@ func getChunkSizes(size int64) (chunks []chunkSize) { var attrMatch = regexp.MustCompile(`{".*"}`) -func decryptAttr(key []byte, data []byte) (attr FileAttr, err error) { +func decryptAttr(key []byte, data string) (attr FileAttr, err error) { err = EBADATTR block, err := aes.NewCipher(key) if err != nil { return attr, err } - iv := a32_to_bytes([]uint32{0, 0, 0, 0}) + iv, err := a32_to_bytes([]uint32{0, 0, 0, 0}) + if err != nil { + return attr, err + } mode := cipher.NewCBCDecrypter(block, iv) buf := make([]byte, len(data)) - mode.CryptBlocks(buf, base64urldecode([]byte(data))) + ddata, err := base64urldecode(data) + if err != nil { + return attr, err + } + mode.CryptBlocks(buf, ddata) if string(buf[:4]) == "MEGA" { str := strings.TrimRight(string(buf[4:]), "\x00") @@ -299,21 +336,24 @@ func decryptAttr(key []byte, data []byte) (attr FileAttr, err error) { return attr, err } -func encryptAttr(key []byte, attr FileAttr) (b []byte, err error) { +func encryptAttr(key []byte, attr FileAttr) (b string, err error) { err = EBADATTR block, err := aes.NewCipher(key) if err != nil { - return nil, err + return "", err } data, err := json.Marshal(attr) if err != nil { - return nil, err + return "", err } attrib := []byte("MEGA") attrib = append(attrib, data...) attrib = paddnull(attrib, 16) - iv := a32_to_bytes([]uint32{0, 0, 0, 0}) + iv, err := a32_to_bytes([]uint32{0, 0, 0, 0}) + if err != nil { + return "", err + } mode := cipher.NewCBCEncrypter(block, iv) mode.CryptBlocks(attrib, attrib) diff --git a/vendor/github.com/t3rm1n4l/go-mega/utils_test.go b/vendor/github.com/t3rm1n4l/go-mega/utils_test.go deleted file mode 100644 index 5330376..0000000 --- a/vendor/github.com/t3rm1n4l/go-mega/utils_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package mega - -import ( - "reflect" - "testing" -) - -func TestGetChunkSizes(t *testing.T) { - const k = 1024 - for _, test := range []struct { - size int64 - want []chunkSize - }{ - { - size: 0, - want: []chunkSize(nil), - }, - { - size: 1, - want: []chunkSize{ - {0, 1}, - }, - }, - { - size: 128*k - 1, - want: []chunkSize{ - {0, 128*k - 1}, - }, - }, - { - size: 128 * k, - want: []chunkSize{ - {0, 128 * k}, - }, - }, - { - size: 128*k + 1, - want: []chunkSize{ - {0, 128 * k}, - {128 * k, 1}, - }, - }, - { - size: 384*k - 1, - want: []chunkSize{ - {0, 128 * k}, - {128 * k, 256*k - 1}, - }, - }, - { - size: 384 * k, - want: []chunkSize{ - {0, 128 * k}, - {128 * k, 256 * k}, - }, - }, - { - size: 384*k + 1, - want: []chunkSize{ - {0, 128 * k}, - {128 * k, 256 * k}, - {384 * k, 1}, - }, - }, - { - size: 5 * k * k, - want: []chunkSize{ - {0, 128 * k}, - {128 * k, 256 * k}, - {384 * k, 384 * k}, - {768 * k, 512 * k}, - {1280 * k, 640 * k}, - {1920 * k, 768 * k}, - {2688 * k, 896 * k}, - {3584 * k, 1024 * k}, - {4608 * k, 512 * k}, - }, - }, - { - size: 10 * k * k, - want: []chunkSize{ - {0, 128 * k}, - {128 * k, 256 * k}, - {384 * k, 384 * k}, - {768 * k, 512 * k}, - {1280 * k, 640 * k}, - {1920 * k, 768 * k}, - {2688 * k, 896 * k}, - {3584 * k, 1024 * k}, - {4608 * k, 1024 * k}, - {5632 * k, 1024 * k}, - {6656 * k, 1024 * k}, - {7680 * k, 1024 * k}, - {8704 * k, 1024 * k}, - {9728 * k, 512 * k}, - }, - }, - } { - got := getChunkSizes(test.size) - if !reflect.DeepEqual(test.want, got) { - t.Errorf("incorrect chunks for size %d: want %#v, got %#v", test.size, test.want, got) - - } - } -} diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS new file mode 100644 index 0000000..2b00ddb --- /dev/null +++ b/vendor/golang.org/x/crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS new file mode 100644 index 0000000..1fbd3e9 --- /dev/null +++ b/vendor/golang.org/x/crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 0000000..593f653 --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 // import "golang.org/x/crypto/pbkdf2" + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/modules.txt b/vendor/modules.txt new file mode 100644 index 0000000..ebd54aa --- /dev/null +++ b/vendor/modules.txt @@ -0,0 +1,9 @@ +# github.com/t3rm1n4l/go-humanize v0.0.0-20121229052809-e7ed15be05eb +## explicit +github.com/t3rm1n4l/go-humanize +# github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf +## explicit; go 1.13 +github.com/t3rm1n4l/go-mega +# golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613 +## explicit +golang.org/x/crypto/pbkdf2 From 49d99589998470f4353f3a296d675423fd572d58 Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 31 Jan 2023 00:10:20 -0800 Subject: [PATCH 2/2] updated the Makefile so it uses go.mod which is now mandatory, but now the makefile should work straight away without issue --- Makefile | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 4d4690f..c882878 100644 --- a/Makefile +++ b/Makefile @@ -24,9 +24,10 @@ release: # Get the build dependencies build_dep: - go get -u github.com/kisielk/errcheck - go get -u golang.org/x/tools/cmd/goimports - go get -u github.com/golang/lint/golint + go mod tidy + #go get -u github.com/kisielk/errcheck + #go get -u golang.org/x/tools/cmd/goimports + #go get -u github.com/golang/lint/golint # Do source code quality checks check: