From bb3d94987bb56347194c532042e727ea62a3373e Mon Sep 17 00:00:00 2001
From: Senyoret1 <34079003+Senyoret1@users.noreply.github.com>
Date: Wed, 19 Jun 2019 19:26:33 -0400
Subject: [PATCH 1/8] Prepare for creating wasm builds
---
.travis.yml | 2 +
Makefile | 6 +
README.md | 47 ++-
karma.conf.js => karma-gopher.conf.js | 4 +-
karma-wasm.conf.js | 37 ++
package.json | 5 +-
tests/cipher-wasm.spec.ts | 200 +++++++++++
tests/wasm_exec.js | 465 ++++++++++++++++++++++++++
wasm/skycoin.go | 172 ++++++++++
9 files changed, 924 insertions(+), 14 deletions(-)
rename karma.conf.js => karma-gopher.conf.js (94%)
create mode 100644 karma-wasm.conf.js
create mode 100644 tests/cipher-wasm.spec.ts
create mode 100644 tests/wasm_exec.js
create mode 100644 wasm/skycoin.go
diff --git a/.travis.yml b/.travis.yml
index fc1db2c..71497ff 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -14,7 +14,9 @@ install:
script:
- make check
- make build-js-min
+ - make build-wasm
- make test-suite-ts
+ - make test-suite-ts-wasm
notifications:
# https://github.com/kvld/travisci-telegram TravisCI Telegram Bot integration
diff --git a/Makefile b/Makefile
index d2b07f0..02e3a1e 100644
--- a/Makefile
+++ b/Makefile
@@ -11,6 +11,9 @@ build-js-min: ## Build /skycoin/skycoin.go. The result is minified and saved in
go build -o gopherjs-tool vendor/github.com/gopherjs/gopherjs/tool.go
GOOS=linux ./gopherjs-tool build skycoin/skycoin.go -m
+build-wasm: ## Build /wasm/skycoin.go. The result is saved in the repo root as skycoin-lite.wasm
+ GOOS=js GOARCH=wasm go build -o skycoin-lite.wasm ./wasm/skycoin.go
+
test-js: ## Run the Go tests using JavaScript
go build -o gopherjs-tool vendor/github.com/gopherjs/gopherjs/tool.go
./gopherjs-tool test ./skycoin/ -v
@@ -21,6 +24,9 @@ test-suite-ts: ## Run the ts version of the cipher test suite. Use a small numbe
test-suite-ts-extensive: ## Run the ts version of the cipher test suite. All the test cases
npm run test-extensive
+test-suite-ts-wasm: ## Run the ts version of the cipher test suite for wasm
+ npm run test-wasm
+
test:
go test ./... -timeout=10m -cover
diff --git a/README.md b/README.md
index d9fe9eb..f08abf6 100644
--- a/README.md
+++ b/README.md
@@ -2,13 +2,14 @@
# Skycoin Liteclient
-This repository contains a small wrapper for Skycoin (written in Go) to provide mobile and JS bindings.
+This repository contains a small wrapper for Skycoin (written in Go) to provide mobile, JS and wasm bindings.
At the moment it is used to compile
an [Android Archive](https://developer.android.com/studio/projects/android-library.html), an iOS Framework,
-and a JS library with [gopherjs](https://github.com/gopherjs/gopherjs).
+a (deprecated) JS library with [gopherjs](https://github.com/gopherjs/gopherjs) and a wasm file.
-Supports go1.10+.
+Supports go1.10+. However, for compiling the wasm file you must use Go v1.12.x (compatibility
+with Go v1.13+ is not guaranteed).
## Important note about error handling
@@ -33,15 +34,26 @@ $ gomobile bind -target=ios github.com/skycoin/skycoin-lite/mobile
## Compile javascript library
+> IMPORTANT: the ability for transpiling the code to a JavaScript library is deprecated due to important
+performance issues and should not be used. Please compile to a wasm file instead.
+
For the compilation process to javascript library, we use [gopherjs](https://github.com/gopherjs/gopherjs).
To compile the library use `make build-js` or `make build-js-min` (if you want the final file to be minified).
-After compiling, the main.js and main.js.map files will be created/updated in the root of the repository.
+After compiling, `the main.js` and `main.js.map` files will be created/updated in the root of the repository.
+
+## Compile wasm file
+
+> IMPORTANT: you need Go v1.12.x to use this function. It is not guaranteed to work with Go v1.13+.
+
+To compile the wasm file use `make build-wasm`. After compiling, the `skycoin-lite.wasm` file will be
+created/updated in the root of the repository.
## Development
-The javascript library is created starting from [gopher/main.go](gopher/main.go). The Android/iOS library is
-created starting from [mobile/api.go](mobile/api.go).
+The javascript library is created starting from [skycoin/skycoin.go](skycoin/skycoin.go). The wasm file is
+created starting from [wasm/skycoin.go](wasm/skycoin.go). The Android/iOS library is created starting
+from [mobile/api.go](mobile/api.go).
### Running tests
@@ -67,22 +79,37 @@ npm install --global source-map-support
and make sure `NODE_PATH` is set to the value of `npm root --global` in your environment.
-#### TS cipher test suite
+#### TS cipher test suite for GopherJS
+
+> IMPORTANT: the ability for transpiling the code to a JavaScript library is deprecated due to important
+performance issues and should not be used. Please compile to a wasm file instead.
The repository includes a TypeScript version of the cipher test suite, originally written in Go in
-the Skycoin main repository. Because the tests take a significant amount of time to complete in
-JavaScript/TypeScript, the test suite can be run with a limited number of cases with
+the Skycoin main repository. It test the result of the GopherJS version of the library, so before
+using it you must compile the GopherJS version with `make build-js` or `make build-js-min`.
+Because testing the GopherJS version takes a significant amount of time to complete, the test suite
+can be run with a limited number of cases with:
```sh
make test-suite-ts
```
-The test suite can be run with all test cases using
+The test suite can be run with all test cases using:
```sh
make test-suite-ts-extensive
```
+#### TS cipher test suite for wasm
+
+There is test suite for the wasm version of the library, just like there is one for the GopherJS
+version. Before using it you must compile the wasm version with `make build-wasm`. the test suite
+can be run with:
+
+```sh
+make test-suite-ts-wasm
+```
+
### Formatting
All `.go` source files should be formatted `goimports`. You can do this with:
diff --git a/karma.conf.js b/karma-gopher.conf.js
similarity index 94%
rename from karma.conf.js
rename to karma-gopher.conf.js
index ad68559..a0fda54 100644
--- a/karma.conf.js
+++ b/karma-gopher.conf.js
@@ -20,9 +20,9 @@ module.exports = function (config) {
require('karma-typescript')
],
files: [
- 'tests/*.spec.ts',
+ 'tests/cipher.spec.ts',
{ pattern: 'tests/test-fixtures/*.golden', included: false },
- { pattern: 'tests/*.ts', included: true },
+ { pattern: 'tests/utils.ts', included: true },
{ pattern: 'skycoin.js', included: true }
],
preprocessors: {
diff --git a/karma-wasm.conf.js b/karma-wasm.conf.js
new file mode 100644
index 0000000..3e8b3bc
--- /dev/null
+++ b/karma-wasm.conf.js
@@ -0,0 +1,37 @@
+// Karma configuration file, see link for more information
+// https://karma-runner.github.io/0.13/config/configuration-file.html
+
+module.exports = function (config) {
+
+ config.set({
+ basePath: '',
+ frameworks: ['jasmine', 'karma-typescript'],
+ plugins: [
+ require('karma-jasmine'),
+ require('karma-chrome-launcher'),
+ require('karma-jasmine-html-reporter'),
+ require('karma-read-json'),
+ require('karma-typescript')
+ ],
+ files: [
+ 'tests/cipher-wasm.spec.ts',
+ { pattern: 'tests/test-fixtures/*.golden', included: false },
+ { pattern: 'skycoin-lite.wasm', included: false },
+ { pattern: 'tests/utils.ts', included: true },
+ { pattern: 'tests/wasm_exec.js', included: true },
+ ],
+ preprocessors: {
+ "**/*.ts": "karma-typescript"
+ },
+ client: {
+ clearContext: false // leave Jasmine Spec Runner output visible in browser
+ },
+ reporters: ['progress', 'kjhtml', 'karma-typescript'],
+ port: 9876,
+ colors: true,
+ logLevel: config.LOG_INFO,
+ autoWatch: true,
+ browsers: ['ChromeHeadless', 'Chrome'],
+ singleRun: false
+ });
+};
diff --git a/package.json b/package.json
index 4718faf..5b99633 100644
--- a/package.json
+++ b/package.json
@@ -2,8 +2,9 @@
"name": "skycoin-lite",
"version": "0.0.0",
"scripts": {
- "test": "karma start karma.conf.js -sm=false --single-run --browsers ChromeHeadless --browserNoActivityTimeout=180000",
- "test-extensive": "karma start karma.conf.js -sm=false --single-run --browsers ChromeHeadless --browserNoActivityTimeout=180000 --mode 1"
+ "test": "karma start karma-gopher.conf.js -sm=false --single-run --browsers ChromeHeadless --browserNoActivityTimeout=180000",
+ "test-extensive": "karma start karma-gopher.conf.js -sm=false --single-run --browsers ChromeHeadless --browserNoActivityTimeout=180000 --mode 1",
+ "test-wasm": "karma start karma-wasm.conf.js -sm=false --single-run --browsers ChromeHeadless --browserNoActivityTimeout=180000"
},
"private": true,
"devDependencies": {
diff --git a/tests/cipher-wasm.spec.ts b/tests/cipher-wasm.spec.ts
new file mode 100644
index 0000000..e2dead3
--- /dev/null
+++ b/tests/cipher-wasm.spec.ts
@@ -0,0 +1,200 @@
+import { readJSON } from 'karma-read-json';
+
+import { Address, testCases, convertAsciiToHexa } from './utils'
+
+declare var Go: any;
+
+describe('CipherProvider Lib', () => {
+ const fixturesPath = 'tests/test-fixtures/';
+ const addressesFileName = 'many-addresses.golden';
+ const inputHashesFileName = 'input-hashes.golden';
+
+ const seedSignaturesFiles = [
+ 'seed-0000.golden', 'seed-0001.golden', 'seed-0002.golden',
+ 'seed-0003.golden', 'seed-0004.golden', 'seed-0005.golden',
+ 'seed-0006.golden', 'seed-0007.golden', 'seed-0008.golden',
+ 'seed-0009.golden', 'seed-0010.golden'
+ ];
+
+ const testSettings = { addressCount: 1000, seedFilesCount: 11 };
+
+ describe('initialize', () => {
+ it('should initialize', done => {
+ const go = new Go();
+
+ fetch('base/skycoin-lite.wasm').then(response => {
+ response.arrayBuffer().then(ab => {
+ const go = new Go();
+ window['WebAssembly'].instantiate(ab, go.importObject).then(result => {
+ go.run(result.instance);
+
+ done();
+ });
+ });
+ });
+ });
+ });
+
+ describe('generate address', () => {
+ const addressFixtureFile = readJSON(fixturesPath + addressesFileName);
+ const expectedAddresses = addressFixtureFile.keys.slice(0, testSettings.addressCount);
+ let seed = convertAsciiToHexa(atob(addressFixtureFile.seed));
+ let generatedAddress;
+
+ testCases(expectedAddresses, (address: any) => {
+ it('should generate many address correctly', done => {
+ generatedAddress = generateAddress(seed);
+ seed = generatedAddress.next_seed;
+
+ const convertedAddress = {
+ address: generatedAddress.address,
+ public: generatedAddress.public_key,
+ secret: generatedAddress.secret_key
+ };
+
+ expect(convertedAddress).toEqual(address);
+ done();
+ });
+
+ it('should pass the verification', done => {
+ verifyAddress(generatedAddress);
+ done();
+ });
+ });
+ });
+
+ describe('seed signatures', () => {
+ const inputHashes = readJSON(fixturesPath + inputHashesFileName).hashes;
+
+ testCases(seedSignaturesFiles.slice(0, testSettings.seedFilesCount), (fileName: string) => {
+ describe(`should pass the verification for ${fileName}`, () => {
+ let seedKeys;
+ let actualAddresses;
+ let testData: { signature: string, public_key: string, hash: string, secret_key: string, address: string }[] = [];
+
+ beforeAll(() => {
+ const signaturesFixtureFile = readJSON(fixturesPath + fileName);
+ const seed = convertAsciiToHexa(atob(signaturesFixtureFile.seed));
+ seedKeys = signaturesFixtureFile.keys;
+
+ actualAddresses = generateAddresses(seed, seedKeys);
+ testData = getSeedTestData(inputHashes, seedKeys, actualAddresses);
+ });
+
+ it('should check number of signatures and hashes', done => {
+ const result = seedKeys.some(key => key.signatures.length !== inputHashes.length);
+
+ expect(result).toEqual(false);
+ done();
+ });
+
+ it('should generate many address correctly', done => {
+ actualAddresses.forEach((address, index) => {
+ expect(address.address).toEqual(seedKeys[index].address);
+ expect(address.public_key).toEqual(seedKeys[index].public);
+ expect(address.secret_key).toEqual(seedKeys[index].secret);
+ });
+
+ done();
+ });
+
+ it('address should pass the verification', done => {
+ verifyAddresses(actualAddresses);
+ done();
+ });
+
+ it(`should verify signature correctly`, done => {
+ testData.forEach(data => {
+ const result = window['SkycoinCipherExtras'].verifySignature(data.public_key, data.signature, data.hash);
+ expect(result).toBeNull();
+ done();
+ });
+ });
+
+ it(`should check signature correctly`, done => {
+ testData.forEach(data => {
+ const result = window['SkycoinCipherExtras'].chkSig(data.address, data.hash, data.signature);
+ expect(result).toBeNull();
+ done();
+ });
+ });
+
+ it(`should verify signed hash correctly`, done => {
+ testData.forEach(data => {
+ const result = window['SkycoinCipherExtras'].verifySignedHash(data.signature, data.hash);
+ expect(result).toBeNull();
+ done();
+ });
+ });
+
+ it(`should generate public key correctly`, done => {
+ testData.forEach(data => {
+ const pubKey = window['SkycoinCipherExtras'].pubKeyFromSig(data.signature, data.hash);
+ expect(pubKey).toBeTruthy();
+ expect(pubKey === data.public_key).toBeTruthy();
+ done();
+ });
+ });
+
+ it(`sign hash should be created`, done => {
+ testData.forEach(data => {
+ const sig = window['SkycoinCipherExtras'].signHash(data.hash, data.secret_key);
+ expect(sig).toBeTruthy();
+ done();
+ });
+ });
+ });
+ });
+ });
+});
+
+function getSeedTestData(inputHashes, seedKeys, actualAddresses) {
+ const data = [];
+
+ for (let seedIndex = 0; seedIndex < seedKeys.length; seedIndex++) {
+ for (let hashIndex = 0; hashIndex < inputHashes.length; hashIndex++) {
+ data.push({
+ signature: seedKeys[seedIndex].signatures[hashIndex],
+ public_key: actualAddresses[seedIndex].public_key,
+ secret_key: actualAddresses[seedIndex].secret_key,
+ address: actualAddresses[seedIndex].address,
+ hash: inputHashes[hashIndex]
+ });
+ }
+ }
+
+ return data;
+}
+
+function generateAddresses(seed: string, keys: any[]): Address[] {
+ return keys.map(() => {
+ const generatedAddress = generateAddress(seed);
+ seed = generatedAddress.next_seed;
+
+ return generatedAddress;
+ });
+}
+
+function generateAddress(seed: string): Address {
+ const address = window['SkycoinCipher'].generateAddress(seed);
+ return {
+ address: address.address,
+ public_key: address.public,
+ secret_key: address.secret,
+ next_seed: address.nextSeed
+ };
+}
+
+function verifyAddress(address) {
+ const addressFromPubKey = window['SkycoinCipherExtras'].addressFromPubKey(address.public_key);
+ const addressFromSecKey = window['SkycoinCipherExtras'].addressFromSecKey(address.secret_key);
+
+ expect(addressFromPubKey && addressFromSecKey && addressFromPubKey === addressFromSecKey).toBe(true);
+
+ expect(window['SkycoinCipherExtras'].verifySeckey(address.secret_key)).toBe(null);
+ expect(window['SkycoinCipherExtras'].verifyPubkey(address.public_key)).toBe(null);
+}
+
+function verifyAddresses(addresses) {
+ addresses.forEach(address => verifyAddress(address));
+}
diff --git a/tests/wasm_exec.js b/tests/wasm_exec.js
new file mode 100644
index 0000000..165d567
--- /dev/null
+++ b/tests/wasm_exec.js
@@ -0,0 +1,465 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(() => {
+ if (typeof global !== "undefined") {
+ // global already exists
+ } else if (typeof window !== "undefined") {
+ window.global = window;
+ } else if (typeof self !== "undefined") {
+ self.global = self;
+ } else {
+ throw new Error("cannot export Go (neither global, window nor self is defined)");
+ }
+
+ // Map web browser API and Node.js API to a single common API (preferring web standards over Node.js API).
+ const isNodeJS = global.process && global.process.title === "node";
+ if (isNodeJS) {
+ global.require = require;
+ global.fs = require("fs");
+
+ const nodeCrypto = require("crypto");
+ global.crypto = {
+ getRandomValues(b) {
+ nodeCrypto.randomFillSync(b);
+ },
+ };
+
+ global.performance = {
+ now() {
+ const [sec, nsec] = process.hrtime();
+ return sec * 1000 + nsec / 1000000;
+ },
+ };
+
+ const util = require("util");
+ global.TextEncoder = util.TextEncoder;
+ global.TextDecoder = util.TextDecoder;
+ } else {
+ let outputBuf = "";
+ global.fs = {
+ constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1 }, // unused
+ writeSync(fd, buf) {
+ outputBuf += decoder.decode(buf);
+ const nl = outputBuf.lastIndexOf("\n");
+ if (nl != -1) {
+ console.log(outputBuf.substr(0, nl));
+ outputBuf = outputBuf.substr(nl + 1);
+ }
+ return buf.length;
+ },
+ write(fd, buf, offset, length, position, callback) {
+ if (offset !== 0 || length !== buf.length || position !== null) {
+ throw new Error("not implemented");
+ }
+ const n = this.writeSync(fd, buf);
+ callback(null, n);
+ },
+ open(path, flags, mode, callback) {
+ const err = new Error("not implemented");
+ err.code = "ENOSYS";
+ callback(err);
+ },
+ read(fd, buffer, offset, length, position, callback) {
+ const err = new Error("not implemented");
+ err.code = "ENOSYS";
+ callback(err);
+ },
+ fsync(fd, callback) {
+ callback(null);
+ },
+ };
+ }
+
+ const encoder = new TextEncoder("utf-8");
+ const decoder = new TextDecoder("utf-8");
+
+ global.Go = class {
+ constructor() {
+ this.argv = ["js"];
+ this.env = {};
+ this.exit = (code) => {
+ if (code !== 0) {
+ console.warn("exit code:", code);
+ }
+ };
+ this._exitPromise = new Promise((resolve) => {
+ this._resolveExitPromise = resolve;
+ });
+ this._pendingEvent = null;
+ this._scheduledTimeouts = new Map();
+ this._nextCallbackTimeoutID = 1;
+
+ const mem = () => {
+ // The buffer may change when requesting more memory.
+ return new DataView(this._inst.exports.mem.buffer);
+ }
+
+ const setInt64 = (addr, v) => {
+ mem().setUint32(addr + 0, v, true);
+ mem().setUint32(addr + 4, Math.floor(v / 4294967296), true);
+ }
+
+ const getInt64 = (addr) => {
+ const low = mem().getUint32(addr + 0, true);
+ const high = mem().getInt32(addr + 4, true);
+ return low + high * 4294967296;
+ }
+
+ const loadValue = (addr) => {
+ const f = mem().getFloat64(addr, true);
+ if (f === 0) {
+ return undefined;
+ }
+ if (!isNaN(f)) {
+ return f;
+ }
+
+ const id = mem().getUint32(addr, true);
+ return this._values[id];
+ }
+
+ const storeValue = (addr, v) => {
+ const nanHead = 0x7FF80000;
+
+ if (typeof v === "number") {
+ if (isNaN(v)) {
+ mem().setUint32(addr + 4, nanHead, true);
+ mem().setUint32(addr, 0, true);
+ return;
+ }
+ if (v === 0) {
+ mem().setUint32(addr + 4, nanHead, true);
+ mem().setUint32(addr, 1, true);
+ return;
+ }
+ mem().setFloat64(addr, v, true);
+ return;
+ }
+
+ switch (v) {
+ case undefined:
+ mem().setFloat64(addr, 0, true);
+ return;
+ case null:
+ mem().setUint32(addr + 4, nanHead, true);
+ mem().setUint32(addr, 2, true);
+ return;
+ case true:
+ mem().setUint32(addr + 4, nanHead, true);
+ mem().setUint32(addr, 3, true);
+ return;
+ case false:
+ mem().setUint32(addr + 4, nanHead, true);
+ mem().setUint32(addr, 4, true);
+ return;
+ }
+
+ let ref = this._refs.get(v);
+ if (ref === undefined) {
+ ref = this._values.length;
+ this._values.push(v);
+ this._refs.set(v, ref);
+ }
+ let typeFlag = 0;
+ switch (typeof v) {
+ case "string":
+ typeFlag = 1;
+ break;
+ case "symbol":
+ typeFlag = 2;
+ break;
+ case "function":
+ typeFlag = 3;
+ break;
+ }
+ mem().setUint32(addr + 4, nanHead | typeFlag, true);
+ mem().setUint32(addr, ref, true);
+ }
+
+ const loadSlice = (addr) => {
+ const array = getInt64(addr + 0);
+ const len = getInt64(addr + 8);
+ return new Uint8Array(this._inst.exports.mem.buffer, array, len);
+ }
+
+ const loadSliceOfValues = (addr) => {
+ const array = getInt64(addr + 0);
+ const len = getInt64(addr + 8);
+ const a = new Array(len);
+ for (let i = 0; i < len; i++) {
+ a[i] = loadValue(array + i * 8);
+ }
+ return a;
+ }
+
+ const loadString = (addr) => {
+ const saddr = getInt64(addr + 0);
+ const len = getInt64(addr + 8);
+ return decoder.decode(new DataView(this._inst.exports.mem.buffer, saddr, len));
+ }
+
+ const timeOrigin = Date.now() - performance.now();
+ this.importObject = {
+ go: {
+ // Go's SP does not change as long as no Go code is running. Some operations (e.g. calls, getters and setters)
+ // may synchronously trigger a Go event handler. This makes Go code get executed in the middle of the imported
+ // function. A goroutine can switch to a new stack if the current stack is too small (see morestack function).
+ // This changes the SP, thus we have to update the SP used by the imported function.
+
+ // func wasmExit(code int32)
+ "runtime.wasmExit": (sp) => {
+ const code = mem().getInt32(sp + 8, true);
+ this.exited = true;
+ delete this._inst;
+ delete this._values;
+ delete this._refs;
+ this.exit(code);
+ },
+
+ // func wasmWrite(fd uintptr, p unsafe.Pointer, n int32)
+ "runtime.wasmWrite": (sp) => {
+ const fd = getInt64(sp + 8);
+ const p = getInt64(sp + 16);
+ const n = mem().getInt32(sp + 24, true);
+ fs.writeSync(fd, new Uint8Array(this._inst.exports.mem.buffer, p, n));
+ },
+
+ // func nanotime() int64
+ "runtime.nanotime": (sp) => {
+ setInt64(sp + 8, (timeOrigin + performance.now()) * 1000000);
+ },
+
+ // func walltime() (sec int64, nsec int32)
+ "runtime.walltime": (sp) => {
+ const msec = (new Date).getTime();
+ setInt64(sp + 8, msec / 1000);
+ mem().setInt32(sp + 16, (msec % 1000) * 1000000, true);
+ },
+
+ // func scheduleTimeoutEvent(delay int64) int32
+ "runtime.scheduleTimeoutEvent": (sp) => {
+ const id = this._nextCallbackTimeoutID;
+ this._nextCallbackTimeoutID++;
+ this._scheduledTimeouts.set(id, setTimeout(
+ () => { this._resume(); },
+ getInt64(sp + 8) + 1, // setTimeout has been seen to fire up to 1 millisecond early
+ ));
+ mem().setInt32(sp + 16, id, true);
+ },
+
+ // func clearTimeoutEvent(id int32)
+ "runtime.clearTimeoutEvent": (sp) => {
+ const id = mem().getInt32(sp + 8, true);
+ clearTimeout(this._scheduledTimeouts.get(id));
+ this._scheduledTimeouts.delete(id);
+ },
+
+ // func getRandomData(r []byte)
+ "runtime.getRandomData": (sp) => {
+ crypto.getRandomValues(loadSlice(sp + 8));
+ },
+
+ // func stringVal(value string) ref
+ "syscall/js.stringVal": (sp) => {
+ storeValue(sp + 24, loadString(sp + 8));
+ },
+
+ // func valueGet(v ref, p string) ref
+ "syscall/js.valueGet": (sp) => {
+ const result = Reflect.get(loadValue(sp + 8), loadString(sp + 16));
+ sp = this._inst.exports.getsp(); // see comment above
+ storeValue(sp + 32, result);
+ },
+
+ // func valueSet(v ref, p string, x ref)
+ "syscall/js.valueSet": (sp) => {
+ Reflect.set(loadValue(sp + 8), loadString(sp + 16), loadValue(sp + 32));
+ },
+
+ // func valueIndex(v ref, i int) ref
+ "syscall/js.valueIndex": (sp) => {
+ storeValue(sp + 24, Reflect.get(loadValue(sp + 8), getInt64(sp + 16)));
+ },
+
+ // valueSetIndex(v ref, i int, x ref)
+ "syscall/js.valueSetIndex": (sp) => {
+ Reflect.set(loadValue(sp + 8), getInt64(sp + 16), loadValue(sp + 24));
+ },
+
+ // func valueCall(v ref, m string, args []ref) (ref, bool)
+ "syscall/js.valueCall": (sp) => {
+ try {
+ const v = loadValue(sp + 8);
+ const m = Reflect.get(v, loadString(sp + 16));
+ const args = loadSliceOfValues(sp + 32);
+ const result = Reflect.apply(m, v, args);
+ sp = this._inst.exports.getsp(); // see comment above
+ storeValue(sp + 56, result);
+ mem().setUint8(sp + 64, 1);
+ } catch (err) {
+ storeValue(sp + 56, err);
+ mem().setUint8(sp + 64, 0);
+ }
+ },
+
+ // func valueInvoke(v ref, args []ref) (ref, bool)
+ "syscall/js.valueInvoke": (sp) => {
+ try {
+ const v = loadValue(sp + 8);
+ const args = loadSliceOfValues(sp + 16);
+ const result = Reflect.apply(v, undefined, args);
+ sp = this._inst.exports.getsp(); // see comment above
+ storeValue(sp + 40, result);
+ mem().setUint8(sp + 48, 1);
+ } catch (err) {
+ storeValue(sp + 40, err);
+ mem().setUint8(sp + 48, 0);
+ }
+ },
+
+ // func valueNew(v ref, args []ref) (ref, bool)
+ "syscall/js.valueNew": (sp) => {
+ try {
+ const v = loadValue(sp + 8);
+ const args = loadSliceOfValues(sp + 16);
+ const result = Reflect.construct(v, args);
+ sp = this._inst.exports.getsp(); // see comment above
+ storeValue(sp + 40, result);
+ mem().setUint8(sp + 48, 1);
+ } catch (err) {
+ storeValue(sp + 40, err);
+ mem().setUint8(sp + 48, 0);
+ }
+ },
+
+ // func valueLength(v ref) int
+ "syscall/js.valueLength": (sp) => {
+ setInt64(sp + 16, parseInt(loadValue(sp + 8).length));
+ },
+
+ // valuePrepareString(v ref) (ref, int)
+ "syscall/js.valuePrepareString": (sp) => {
+ const str = encoder.encode(String(loadValue(sp + 8)));
+ storeValue(sp + 16, str);
+ setInt64(sp + 24, str.length);
+ },
+
+ // valueLoadString(v ref, b []byte)
+ "syscall/js.valueLoadString": (sp) => {
+ const str = loadValue(sp + 8);
+ loadSlice(sp + 16).set(str);
+ },
+
+ // func valueInstanceOf(v ref, t ref) bool
+ "syscall/js.valueInstanceOf": (sp) => {
+ mem().setUint8(sp + 24, loadValue(sp + 8) instanceof loadValue(sp + 16));
+ },
+
+ "debug": (value) => {
+ console.log(value);
+ },
+ }
+ };
+ }
+
+ async run(instance) {
+ this._inst = instance;
+ this._values = [ // TODO: garbage collection
+ NaN,
+ 0,
+ null,
+ true,
+ false,
+ global,
+ this._inst.exports.mem,
+ this,
+ ];
+ this._refs = new Map();
+ this.exited = false;
+
+ const mem = new DataView(this._inst.exports.mem.buffer)
+
+ // Pass command line arguments and environment variables to WebAssembly by writing them to the linear memory.
+ let offset = 4096;
+
+ const strPtr = (str) => {
+ let ptr = offset;
+ new Uint8Array(mem.buffer, offset, str.length + 1).set(encoder.encode(str + "\0"));
+ offset += str.length + (8 - (str.length % 8));
+ return ptr;
+ };
+
+ const argc = this.argv.length;
+
+ const argvPtrs = [];
+ this.argv.forEach((arg) => {
+ argvPtrs.push(strPtr(arg));
+ });
+
+ const keys = Object.keys(this.env).sort();
+ argvPtrs.push(keys.length);
+ keys.forEach((key) => {
+ argvPtrs.push(strPtr(`${key}=${this.env[key]}`));
+ });
+
+ const argv = offset;
+ argvPtrs.forEach((ptr) => {
+ mem.setUint32(offset, ptr, true);
+ mem.setUint32(offset + 4, 0, true);
+ offset += 8;
+ });
+
+ this._inst.exports.run(argc, argv);
+ if (this.exited) {
+ this._resolveExitPromise();
+ }
+ await this._exitPromise;
+ }
+
+ _resume() {
+ if (this.exited) {
+ throw new Error("Go program has already exited");
+ }
+ this._inst.exports.resume();
+ if (this.exited) {
+ this._resolveExitPromise();
+ }
+ }
+
+ _makeFuncWrapper(id) {
+ const go = this;
+ return function () {
+ const event = { id: id, this: this, args: arguments };
+ go._pendingEvent = event;
+ go._resume();
+ return event.result;
+ };
+ }
+ }
+
+ if (isNodeJS) {
+ if (process.argv.length < 3) {
+ process.stderr.write("usage: go_js_wasm_exec [wasm binary] [arguments]\n");
+ process.exit(1);
+ }
+
+ const go = new Go();
+ go.argv = process.argv.slice(2);
+ go.env = Object.assign({ TMPDIR: require("os").tmpdir() }, process.env);
+ go.exit = process.exit;
+ WebAssembly.instantiate(fs.readFileSync(process.argv[2]), go.importObject).then((result) => {
+ process.on("exit", (code) => { // Node.js exits if no event handler is pending
+ if (code === 0 && !go.exited) {
+ // deadlock, make Go print error and stack traces
+ go._pendingEvent = { id: 0 };
+ go._resume();
+ }
+ });
+ return go.run(result.instance);
+ }).catch((err) => {
+ throw err;
+ });
+ }
+})();
diff --git a/wasm/skycoin.go b/wasm/skycoin.go
new file mode 100644
index 0000000..41fab13
--- /dev/null
+++ b/wasm/skycoin.go
@@ -0,0 +1,172 @@
+package main
+
+import (
+ "errors"
+ "syscall/js"
+
+ "github.com/skycoin/skycoin-lite/liteclient"
+)
+
+func recoverFromPanic(response *interface{}) {
+ if err := recover(); err != nil {
+ finalResponse := make(map[string]interface{})
+
+ if r, ok := err.(error); ok {
+ finalResponse["error"] = r.Error()
+ } else if r, ok := err.(string); ok {
+ finalResponse["error"] = r
+ } else {
+ finalResponse["error"] = "Error performing cryptographic operation"
+ }
+
+ *response = finalResponse
+ }
+}
+
+func checkParams(params *[]js.Value) {
+ for _, element := range *params {
+ if element.Type() != js.TypeString {
+ panic(errors.New("Invalid argument type"))
+ }
+ }
+}
+
+// Main functions
+
+func generateAddress(this js.Value, inputs []js.Value) (response interface{}) {
+ defer recoverFromPanic(&response)
+ checkParams(&inputs)
+
+ functionResponse := liteclient.GenerateAddress(inputs[0].String())
+
+ finalResponse := make(map[string]interface{})
+ finalResponse["address"] = functionResponse.Address
+ finalResponse["nextSeed"] = functionResponse.NextSeed
+ finalResponse["public"] = functionResponse.Public
+ finalResponse["secret"] = functionResponse.Secret
+
+ return finalResponse
+}
+
+func prepareTransaction(this js.Value, inputs []js.Value) (response interface{}) {
+ defer recoverFromPanic(&response)
+ checkParams(&inputs)
+
+ functionResponse := liteclient.PrepareTransaction(inputs[0].String(), inputs[1].String())
+
+ return functionResponse
+}
+
+func prepareTransactionWithSignatures(this js.Value, inputs []js.Value) (response interface{}) {
+ defer recoverFromPanic(&response)
+ checkParams(&inputs)
+
+ functionResponse := liteclient.PrepareTransactionWithSignatures(inputs[0].String(), inputs[1].String(), inputs[2].String())
+
+ return functionResponse
+}
+
+// Extra functions
+
+func verifySignature(this js.Value, inputs []js.Value) (response interface{}) {
+ defer recoverFromPanic(&response)
+ checkParams(&inputs)
+
+ liteclient.VerifySignature(inputs[0].String(), inputs[1].String(), inputs[2].String())
+
+ return
+}
+
+func chkSig(this js.Value, inputs []js.Value) (response interface{}) {
+ defer recoverFromPanic(&response)
+ checkParams(&inputs)
+
+ liteclient.ChkSig(inputs[0].String(), inputs[1].String(), inputs[2].String())
+
+ return
+}
+
+func verifySignedHash(this js.Value, inputs []js.Value) (response interface{}) {
+ defer recoverFromPanic(&response)
+ checkParams(&inputs)
+
+ liteclient.VerifySignedHash(inputs[0].String(), inputs[1].String())
+
+ return
+}
+
+func verifySeckey(this js.Value, inputs []js.Value) (response interface{}) {
+ defer recoverFromPanic(&response)
+ checkParams(&inputs)
+
+ liteclient.VerifySeckey(inputs[0].String())
+
+ return
+}
+
+func verifyPubkey(this js.Value, inputs []js.Value) (response interface{}) {
+ defer recoverFromPanic(&response)
+ checkParams(&inputs)
+
+ liteclient.VerifyPubkey(inputs[0].String())
+
+ return
+}
+
+func addressFromPubKey(this js.Value, inputs []js.Value) (response interface{}) {
+ defer recoverFromPanic(&response)
+ checkParams(&inputs)
+
+ functionResponse := liteclient.AddressFromPubKey(inputs[0].String())
+
+ return functionResponse
+}
+
+func addressFromSecKey(this js.Value, inputs []js.Value) (response interface{}) {
+ defer recoverFromPanic(&response)
+ checkParams(&inputs)
+
+ functionResponse := liteclient.AddressFromSecKey(inputs[0].String())
+
+ return functionResponse
+}
+
+func pubKeyFromSig(this js.Value, inputs []js.Value) (response interface{}) {
+ defer recoverFromPanic(&response)
+ checkParams(&inputs)
+
+ functionResponse := liteclient.PubKeyFromSig(inputs[0].String(), inputs[1].String())
+
+ return functionResponse
+}
+
+func signHash(this js.Value, inputs []js.Value) (response interface{}) {
+ defer recoverFromPanic(&response)
+ checkParams(&inputs)
+
+ functionResponse := liteclient.SignHash(inputs[0].String(), inputs[1].String())
+
+ return functionResponse
+}
+
+func main() {
+ c := make(chan bool)
+ cipherNamespace := "SkycoinCipher"
+ js.Global().Set(cipherNamespace, js.FuncOf(nil))
+ js.Global().Get(cipherNamespace).Set("generateAddress", js.FuncOf(generateAddress))
+ js.Global().Get(cipherNamespace).Set("prepareTransaction", js.FuncOf(prepareTransaction))
+ js.Global().Get(cipherNamespace).Set("prepareTransactionWithSignatures", js.FuncOf(prepareTransactionWithSignatures))
+
+ cipherExtrasNamespace := "SkycoinCipherExtras"
+ js.Global().Set(cipherExtrasNamespace, js.FuncOf(nil))
+ js.Global().Get(cipherExtrasNamespace).Set("verifySignature", js.FuncOf(verifySignature))
+ js.Global().Get(cipherExtrasNamespace).Set("chkSig", js.FuncOf(chkSig))
+ js.Global().Get(cipherExtrasNamespace).Set("verifySignedHash", js.FuncOf(verifySignedHash))
+ js.Global().Get(cipherExtrasNamespace).Set("verifySeckey", js.FuncOf(verifySeckey))
+ js.Global().Get(cipherExtrasNamespace).Set("verifyPubkey", js.FuncOf(verifyPubkey))
+ js.Global().Get(cipherExtrasNamespace).Set("addressFromPubKey", js.FuncOf(addressFromPubKey))
+ js.Global().Get(cipherExtrasNamespace).Set("addressFromSecKey", js.FuncOf(addressFromSecKey))
+ js.Global().Get(cipherExtrasNamespace).Set("pubKeyFromSig", js.FuncOf(pubKeyFromSig))
+ js.Global().Get(cipherExtrasNamespace).Set("signHash", js.FuncOf(signHash))
+ <-c
+}
From e94f3963258ac81b6d3d3fa4171ee03157a422a1 Mon Sep 17 00:00:00 2001
From: Senyoret1 <34079003+Senyoret1@users.noreply.github.com>
Date: Thu, 20 Jun 2019 10:50:21 -0400
Subject: [PATCH 2/8] Small changes related to the wasm builds
---
.travis.yml | 2 +-
Makefile | 4 ++--
README.md | 4 ++--
wasm/skycoin.go | 17 +++++++++++++++--
4 files changed, 20 insertions(+), 7 deletions(-)
diff --git a/.travis.yml b/.travis.yml
index 71497ff..2f11b04 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,7 +3,7 @@ dist: trusty
language: go
go:
- - "1.10.x"
+ - "1.12.x"
install:
- go get -t ./...
diff --git a/Makefile b/Makefile
index 02e3a1e..f494fee 100644
--- a/Makefile
+++ b/Makefile
@@ -18,10 +18,10 @@ test-js: ## Run the Go tests using JavaScript
go build -o gopherjs-tool vendor/github.com/gopherjs/gopherjs/tool.go
./gopherjs-tool test ./skycoin/ -v
-test-suite-ts: ## Run the ts version of the cipher test suite. Use a small number of test cases
+test-suite-ts: ## Run the ts version of the cipher test suite for GopherJS. Use a small number of test cases
npm run test
-test-suite-ts-extensive: ## Run the ts version of the cipher test suite. All the test cases
+test-suite-ts-extensive: ## Run the ts version of the cipher test suite for GopherJS. All the test cases
npm run test-extensive
test-suite-ts-wasm: ## Run the ts version of the cipher test suite for wasm
diff --git a/README.md b/README.md
index f08abf6..1246a36 100644
--- a/README.md
+++ b/README.md
@@ -85,7 +85,7 @@ and make sure `NODE_PATH` is set to the value of `npm root --global` in your env
performance issues and should not be used. Please compile to a wasm file instead.
The repository includes a TypeScript version of the cipher test suite, originally written in Go in
-the Skycoin main repository. It test the result of the GopherJS version of the library, so before
+the Skycoin main repository. It test the GopherJS version of the library, so before
using it you must compile the GopherJS version with `make build-js` or `make build-js-min`.
Because testing the GopherJS version takes a significant amount of time to complete, the test suite
can be run with a limited number of cases with:
@@ -103,7 +103,7 @@ make test-suite-ts-extensive
#### TS cipher test suite for wasm
There is test suite for the wasm version of the library, just like there is one for the GopherJS
-version. Before using it you must compile the wasm version with `make build-wasm`. the test suite
+version. Before using it you must compile the wasm version with `make build-wasm`. The test suite
can be run with:
```sh
diff --git a/wasm/skycoin.go b/wasm/skycoin.go
index 41fab13..7599694 100644
--- a/wasm/skycoin.go
+++ b/wasm/skycoin.go
@@ -7,6 +7,10 @@ import (
"github.com/skycoin/skycoin-lite/liteclient"
)
+// recoverFromPanic captures the panics and returns an object with the error message.
+// It must be used in all the functions that can be called using the compiled wasm
+// file, as the Go code contains multiple panics that would completelly stop the
+// excecution of the wasm application without returning adequate errors to the JS code.
func recoverFromPanic(response *interface{}) {
if err := recover(); err != nil {
finalResponse := make(map[string]interface{})
@@ -23,6 +27,7 @@ func recoverFromPanic(response *interface{}) {
}
}
+// checkParams checks if all the params are of he type js.TypeString.
func checkParams(params *[]js.Value) {
for _, element := range *params {
if element.Type() != js.TypeString {
@@ -31,7 +36,9 @@ func checkParams(params *[]js.Value) {
}
}
-// Main functions
+// Main functions:
+// The following functions are simply wrappers to call the functions in
+// liteclient/client.go.
func generateAddress(this js.Value, inputs []js.Value) (response interface{}) {
defer recoverFromPanic(&response)
@@ -66,7 +73,9 @@ func prepareTransactionWithSignatures(this js.Value, inputs []js.Value) (respons
return functionResponse
}
-// Extra functions
+// Extra functions:
+// The following functions are simply wrappers to call the functions in
+// liteclient/extras.go.
func verifySignature(this js.Value, inputs []js.Value) (response interface{}) {
defer recoverFromPanic(&response)
@@ -150,13 +159,17 @@ func signHash(this js.Value, inputs []js.Value) (response interface{}) {
}
func main() {
+ // Create a channel for keeping the application alive
c := make(chan bool)
+
+ // Add the main functions to the the "window.SkycoinCipher" object.
cipherNamespace := "SkycoinCipher"
js.Global().Set(cipherNamespace, js.FuncOf(nil))
js.Global().Get(cipherNamespace).Set("generateAddress", js.FuncOf(generateAddress))
js.Global().Get(cipherNamespace).Set("prepareTransaction", js.FuncOf(prepareTransaction))
js.Global().Get(cipherNamespace).Set("prepareTransactionWithSignatures", js.FuncOf(prepareTransactionWithSignatures))
+ // Add the extra functions to the the "window.SkycoinCipherExtras" object.
cipherExtrasNamespace := "SkycoinCipherExtras"
js.Global().Set(cipherExtrasNamespace, js.FuncOf(nil))
js.Global().Get(cipherExtrasNamespace).Set("verifySignature", js.FuncOf(verifySignature))
From 394ae03ee13547d87859bbacc80fbe314422c50e Mon Sep 17 00:00:00 2001
From: Senyoret1 <34079003+Senyoret1@users.noreply.github.com>
Date: Thu, 20 Jun 2019 12:12:49 -0400
Subject: [PATCH 3/8] Fix travis config
---
.travis.yml | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/.travis.yml b/.travis.yml
index 2f11b04..41a4a11 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -6,16 +6,14 @@ go:
- "1.12.x"
install:
- - go get -t ./...
+ - GOOS=js GOARCH=wasm go get -t ./...
- make install-linters
- npm install
- npm install --global source-map-support
script:
- make check
- - make build-js-min
- make build-wasm
- - make test-suite-ts
- make test-suite-ts-wasm
notifications:
From 7ea64acc685131640250c93d127a6a46cd824718 Mon Sep 17 00:00:00 2001
From: Senyoret1 <34079003+Senyoret1@users.noreply.github.com>
Date: Sat, 22 Jun 2019 20:13:46 -0400
Subject: [PATCH 4/8] Include the secp256k1-go tests in the wasm test suite
---
Gopkg.toml | 1 -
Makefile | 4 +
karma-wasm.conf.js | 3 +
tests/cipher-wasm-internal.spec.ts | 66 +
.../fsnotify/fsnotify/example_test.go | 42 +
.../fsnotify/fsnotify/fsnotify_test.go | 70 +
.../fsnotify/fsnotify/inotify_poller_test.go | 229 +++
.../fsnotify/fsnotify/inotify_test.go | 449 +++++
.../fsnotify/integration_darwin_test.go | 147 ++
.../fsnotify/fsnotify/integration_test.go | 1237 ++++++++++++
.../gopherjs/gopherjs/build/build_test.go | 199 ++
.../gotool/internal/load/match_test.go | 167 ++
.../github.com/kisielk/gotool/match18_test.go | 136 ++
.../neelance/astrewrite/simplify_test.go | 190 ++
.../neelance/sourcemap/sourcemap_test.go | 60 +
.../shurcooL/httpfs/filter/filter_test.go | 117 ++
.../shurcooL/httpfs/vfsutil/walk_test.go | 93 +
.../skycoin/src/cipher/address_test.go | 281 +++
.../skycoin/skycoin/src/cipher/crypto_test.go | 344 ++++
.../src/cipher/encoder/encoder_test.go | 894 +++++++++
.../skycoin/skycoin/src/cipher/hash_test.go | 271 +++
.../src/cipher/secp256k1-go/secp256_test.go | 665 +++++++
.../secp256k1-go/secp256k1-go2/ec_test.go | 225 +++
.../secp256k1-go/secp256k1-go2/field_test.go | 36 +
.../secp256k1-go/secp256k1-go2/sig_test.go | 150 ++
.../secp256k1-go/secp256k1-go2/xyz_test.go | 49 +
.../skycoin/skycoin/src/coin/block_test.go | 191 ++
.../skycoin/skycoin/src/coin/coin_test.go | 299 +++
.../skycoin/skycoin/src/coin/math_test.go | 116 ++
.../skycoin/skycoin/src/coin/outputs_test.go | 577 ++++++
.../skycoin/src/coin/transactions_test.go | 953 +++++++++
vendor/github.com/spf13/cobra/args_test.go | 241 +++
.../spf13/cobra/bash_completions_test.go | 217 +++
.../spf13/cobra/cobra/cmd/license_agpl.go | 683 +++++++
.../spf13/cobra/cobra/cmd/license_apache_2.go | 238 +++
.../cobra/cobra/cmd/license_bsd_clause_2.go | 71 +
.../cobra/cobra/cmd/license_bsd_clause_3.go | 78 +
.../spf13/cobra/cobra/cmd/license_gpl_2.go | 376 ++++
.../spf13/cobra/cobra/cmd/license_gpl_3.go | 711 +++++++
.../spf13/cobra/cobra/cmd/license_lgpl.go | 186 ++
.../spf13/cobra/cobra/cmd/license_mit.go | 63 +
.../spf13/cobra/cobra/cmd/licenses.go | 118 ++
vendor/github.com/spf13/cobra/cobra_test.go | 22 +
vendor/github.com/spf13/cobra/command_test.go | 1733 +++++++++++++++++
.../spf13/cobra/zsh_completions_test.go | 89 +
.../github.com/spf13/pflag/bool_slice_test.go | 215 ++
vendor/github.com/spf13/pflag/bool_test.go | 179 ++
vendor/github.com/spf13/pflag/bytes_test.go | 72 +
vendor/github.com/spf13/pflag/count_test.go | 56 +
.../spf13/pflag/duration_slice_test.go | 165 ++
vendor/github.com/spf13/pflag/example_test.go | 36 +
vendor/github.com/spf13/pflag/export_test.go | 29 +
vendor/github.com/spf13/pflag/flag_test.go | 1259 ++++++++++++
.../github.com/spf13/pflag/golangflag_test.go | 47 +
.../github.com/spf13/pflag/int_slice_test.go | 165 ++
.../github.com/spf13/pflag/ip_slice_test.go | 222 +++
vendor/github.com/spf13/pflag/ip_test.go | 63 +
vendor/github.com/spf13/pflag/ipnet_test.go | 70 +
.../github.com/spf13/pflag/printusage_test.go | 74 +
.../spf13/pflag/string_array_test.go | 233 +++
.../spf13/pflag/string_slice_test.go | 253 +++
.../github.com/spf13/pflag/uint_slice_test.go | 161 ++
.../x/crypto/ssh/terminal/terminal_test.go | 358 ++++
vendor/golang.org/x/sys/unix/creds_test.go | 134 ++
.../golang.org/x/sys/unix/dev_linux_test.go | 56 +
vendor/golang.org/x/sys/unix/example_test.go | 19 +
vendor/golang.org/x/sys/unix/export_test.go | 9 +
.../golang.org/x/sys/unix/mmap_unix_test.go | 35 +
vendor/golang.org/x/sys/unix/openbsd_test.go | 113 ++
.../golang.org/x/sys/unix/syscall_bsd_test.go | 93 +
.../x/sys/unix/syscall_darwin_test.go | 19 +
.../x/sys/unix/syscall_freebsd_test.go | 312 +++
.../x/sys/unix/syscall_linux_test.go | 386 ++++
.../x/sys/unix/syscall_solaris_test.go | 55 +
vendor/golang.org/x/sys/unix/syscall_test.go | 60 +
.../x/sys/unix/syscall_unix_test.go | 639 ++++++
.../golang.org/x/sys/unix/timestruct_test.go | 54 +
vendor/golang.org/x/sys/unix/xattr_test.go | 119 ++
.../golang.org/x/sys/windows/syscall_test.go | 53 +
.../x/sys/windows/syscall_windows_test.go | 113 ++
.../x/tools/go/buildutil/allpackages_test.go | 83 +
.../x/tools/go/buildutil/overlay_test.go | 70 +
.../x/tools/go/buildutil/tags_test.go | 28 +
.../x/tools/go/buildutil/util_test.go | 85 +
.../x/tools/go/buildutil/util_windows_test.go | 48 +
.../x/tools/go/gcexportdata/example_test.go | 122 ++
.../go/gcexportdata/gcexportdata_test.go | 41 +
.../go/internal/gcimporter/bexport19_test.go | 96 +
.../go/internal/gcimporter/bexport_test.go | 335 ++++
.../go/internal/gcimporter/gcimporter_test.go | 521 +++++
.../x/tools/go/types/typeutil/example_test.go | 67 +
.../x/tools/go/types/typeutil/imports_test.go | 80 +
.../x/tools/go/types/typeutil/map_test.go | 174 ++
.../x/tools/go/types/typeutil/ui_test.go | 61 +
94 files changed, 20823 insertions(+), 1 deletion(-)
create mode 100644 tests/cipher-wasm-internal.spec.ts
create mode 100644 vendor/github.com/fsnotify/fsnotify/example_test.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify_test.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller_test.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_test.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/integration_darwin_test.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/integration_test.go
create mode 100644 vendor/github.com/gopherjs/gopherjs/build/build_test.go
create mode 100644 vendor/github.com/kisielk/gotool/internal/load/match_test.go
create mode 100644 vendor/github.com/kisielk/gotool/match18_test.go
create mode 100644 vendor/github.com/neelance/astrewrite/simplify_test.go
create mode 100644 vendor/github.com/neelance/sourcemap/sourcemap_test.go
create mode 100644 vendor/github.com/shurcooL/httpfs/filter/filter_test.go
create mode 100644 vendor/github.com/shurcooL/httpfs/vfsutil/walk_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/cipher/address_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/cipher/crypto_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/cipher/encoder/encoder_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/cipher/hash_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/ec_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/field_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/sig_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/xyz_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/coin/block_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/coin/coin_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/coin/math_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/coin/outputs_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/coin/transactions_test.go
create mode 100644 vendor/github.com/spf13/cobra/args_test.go
create mode 100644 vendor/github.com/spf13/cobra/bash_completions_test.go
create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go
create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go
create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go
create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go
create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go
create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go
create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go
create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go
create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/licenses.go
create mode 100644 vendor/github.com/spf13/cobra/cobra_test.go
create mode 100644 vendor/github.com/spf13/cobra/command_test.go
create mode 100644 vendor/github.com/spf13/cobra/zsh_completions_test.go
create mode 100644 vendor/github.com/spf13/pflag/bool_slice_test.go
create mode 100644 vendor/github.com/spf13/pflag/bool_test.go
create mode 100644 vendor/github.com/spf13/pflag/bytes_test.go
create mode 100644 vendor/github.com/spf13/pflag/count_test.go
create mode 100644 vendor/github.com/spf13/pflag/duration_slice_test.go
create mode 100644 vendor/github.com/spf13/pflag/example_test.go
create mode 100644 vendor/github.com/spf13/pflag/export_test.go
create mode 100644 vendor/github.com/spf13/pflag/flag_test.go
create mode 100644 vendor/github.com/spf13/pflag/golangflag_test.go
create mode 100644 vendor/github.com/spf13/pflag/int_slice_test.go
create mode 100644 vendor/github.com/spf13/pflag/ip_slice_test.go
create mode 100644 vendor/github.com/spf13/pflag/ip_test.go
create mode 100644 vendor/github.com/spf13/pflag/ipnet_test.go
create mode 100644 vendor/github.com/spf13/pflag/printusage_test.go
create mode 100644 vendor/github.com/spf13/pflag/string_array_test.go
create mode 100644 vendor/github.com/spf13/pflag/string_slice_test.go
create mode 100644 vendor/github.com/spf13/pflag/uint_slice_test.go
create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go
create mode 100644 vendor/golang.org/x/sys/unix/creds_test.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_linux_test.go
create mode 100644 vendor/golang.org/x/sys/unix/example_test.go
create mode 100644 vendor/golang.org/x/sys/unix/export_test.go
create mode 100644 vendor/golang.org/x/sys/unix/mmap_unix_test.go
create mode 100644 vendor/golang.org/x/sys/unix/openbsd_test.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_bsd_test.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_test.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_test.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_test.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris_test.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_test.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_test.go
create mode 100644 vendor/golang.org/x/sys/unix/timestruct_test.go
create mode 100644 vendor/golang.org/x/sys/unix/xattr_test.go
create mode 100644 vendor/golang.org/x/sys/windows/syscall_test.go
create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows_test.go
create mode 100644 vendor/golang.org/x/tools/go/buildutil/allpackages_test.go
create mode 100644 vendor/golang.org/x/tools/go/buildutil/overlay_test.go
create mode 100644 vendor/golang.org/x/tools/go/buildutil/tags_test.go
create mode 100644 vendor/golang.org/x/tools/go/buildutil/util_test.go
create mode 100644 vendor/golang.org/x/tools/go/buildutil/util_windows_test.go
create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/example_test.go
create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/gcexportdata_test.go
create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bexport19_test.go
create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bexport_test.go
create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter_test.go
create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/example_test.go
create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/imports_test.go
create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/map_test.go
create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/ui_test.go
diff --git a/Gopkg.toml b/Gopkg.toml
index eadd0c3..8454e7c 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -31,5 +31,4 @@ required = ["github.com/gopherjs/gopherjs"]
name = "github.com/skycoin/skycoin"
[prune]
- go-tests = true
unused-packages = true
diff --git a/Makefile b/Makefile
index f494fee..085c45b 100644
--- a/Makefile
+++ b/Makefile
@@ -25,7 +25,11 @@ test-suite-ts-extensive: ## Run the ts version of the cipher test suite for Goph
npm run test-extensive
test-suite-ts-wasm: ## Run the ts version of the cipher test suite for wasm
+ cd vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go && GOOS=js GOARCH=wasm go test -c -o test.wasm
+ cd vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2 && GOOS=js GOARCH=wasm go test -c -o test.wasm
npm run test-wasm
+ cd vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go && rm test.wasm
+ cd vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2 && rm test.wasm
test:
go test ./... -timeout=10m -cover
diff --git a/karma-wasm.conf.js b/karma-wasm.conf.js
index 3e8b3bc..0820dcd 100644
--- a/karma-wasm.conf.js
+++ b/karma-wasm.conf.js
@@ -15,8 +15,11 @@ module.exports = function (config) {
],
files: [
'tests/cipher-wasm.spec.ts',
+ 'tests/cipher-wasm-internal.spec.ts',
{ pattern: 'tests/test-fixtures/*.golden', included: false },
{ pattern: 'skycoin-lite.wasm', included: false },
+ { pattern: 'vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/test.wasm', included: false },
+ { pattern: 'vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/test.wasm', included: false },
{ pattern: 'tests/utils.ts', included: true },
{ pattern: 'tests/wasm_exec.js', included: true },
],
diff --git a/tests/cipher-wasm-internal.spec.ts b/tests/cipher-wasm-internal.spec.ts
new file mode 100644
index 0000000..3b97e87
--- /dev/null
+++ b/tests/cipher-wasm-internal.spec.ts
@@ -0,0 +1,66 @@
+// Runs the tests from src/cipher/sec256k1-go/ and src/cipher/sec256k1-go/secp256k1-go2/
+// after compiled to wasm
+
+declare var Go: any;
+
+describe('Tnternal test ', () => {
+
+ let warningShown = false;
+
+ const tmp = console.warn;
+ console.warn = (message, ...optionalParams) => {
+ warningShown = true;
+ tmp(message, optionalParams);
+ };
+
+ let originalTimeout;
+
+ beforeEach(function() {
+ originalTimeout = jasmine.DEFAULT_TIMEOUT_INTERVAL;
+ jasmine.DEFAULT_TIMEOUT_INTERVAL = 60000;
+ });
+
+ afterEach(function() {
+ jasmine.DEFAULT_TIMEOUT_INTERVAL = originalTimeout;
+ });
+
+ it('test from src/cipher/sec256k1-go/ should pass', done => {
+ warningShown = false;
+ fetch('base/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/test.wasm').then(response => {
+ response.arrayBuffer().then(ab => {
+ const go = new Go();
+ window['WebAssembly'].instantiate(ab, go.importObject).then(result => {
+ go.run(result.instance).then(result => {
+ if (warningShown == false) {
+ done();
+ } else {
+ fail('Test failed.');
+ }
+ }, err => {
+ fail('Test failed.');
+ });
+ });
+ });
+ });
+ });
+
+ it('test from src/cipher/sec256k1-go/secp256k1-go2/ should pass', done => {
+ warningShown = false;
+ fetch('base/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/test.wasm').then(response => {
+ response.arrayBuffer().then(ab => {
+ const go = new Go();
+ window['WebAssembly'].instantiate(ab, go.importObject).then(result => {
+ go.run(result.instance).then(result => {
+ if (warningShown == false) {
+ done();
+ } else {
+ fail('Test failed.');
+ }
+ }, err => {
+ fail('Test failed.');
+ });
+ });
+ });
+ });
+ });
+});
diff --git a/vendor/github.com/fsnotify/fsnotify/example_test.go b/vendor/github.com/fsnotify/fsnotify/example_test.go
new file mode 100644
index 0000000..700502c
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/example_test.go
@@ -0,0 +1,42 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+package fsnotify_test
+
+import (
+ "log"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+func ExampleNewWatcher() {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer watcher.Close()
+
+ done := make(chan bool)
+ go func() {
+ for {
+ select {
+ case event := <-watcher.Events:
+ log.Println("event:", event)
+ if event.Op&fsnotify.Write == fsnotify.Write {
+ log.Println("modified file:", event.Name)
+ }
+ case err := <-watcher.Errors:
+ log.Println("error:", err)
+ }
+ }
+ }()
+
+ err = watcher.Add("/tmp/foo")
+ if err != nil {
+ log.Fatal(err)
+ }
+ <-done
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify_test.go b/vendor/github.com/fsnotify/fsnotify/fsnotify_test.go
new file mode 100644
index 0000000..f9771d9
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify_test.go
@@ -0,0 +1,70 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+package fsnotify
+
+import (
+ "os"
+ "testing"
+ "time"
+)
+
+func TestEventStringWithValue(t *testing.T) {
+ for opMask, expectedString := range map[Op]string{
+ Chmod | Create: `"/usr/someFile": CREATE|CHMOD`,
+ Rename: `"/usr/someFile": RENAME`,
+ Remove: `"/usr/someFile": REMOVE`,
+ Write | Chmod: `"/usr/someFile": WRITE|CHMOD`,
+ } {
+ event := Event{Name: "/usr/someFile", Op: opMask}
+ if event.String() != expectedString {
+ t.Fatalf("Expected %s, got: %v", expectedString, event.String())
+ }
+
+ }
+}
+
+func TestEventOpStringWithValue(t *testing.T) {
+ expectedOpString := "WRITE|CHMOD"
+ event := Event{Name: "someFile", Op: Write | Chmod}
+ if event.Op.String() != expectedOpString {
+ t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
+ }
+}
+
+func TestEventOpStringWithNoValue(t *testing.T) {
+ expectedOpString := ""
+ event := Event{Name: "testFile", Op: 0}
+ if event.Op.String() != expectedOpString {
+ t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
+ }
+}
+
+// TestWatcherClose tests that the goroutine started by creating the watcher can be
+// signalled to return at any time, even if there is no goroutine listening on the events
+// or errors channels.
+func TestWatcherClose(t *testing.T) {
+ t.Parallel()
+
+ name := tempMkFile(t, "")
+ w := newWatcher(t)
+ err := w.Add(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = os.Remove(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Allow the watcher to receive the event.
+ time.Sleep(time.Millisecond * 100)
+
+ err = w.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller_test.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller_test.go
new file mode 100644
index 0000000..26623ef
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller_test.go
@@ -0,0 +1,229 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+type testFd [2]int
+
+func makeTestFd(t *testing.T) testFd {
+ var tfd testFd
+ errno := unix.Pipe(tfd[:])
+ if errno != nil {
+ t.Fatalf("Failed to create pipe: %v", errno)
+ }
+ return tfd
+}
+
+func (tfd testFd) fd() int {
+ return tfd[0]
+}
+
+func (tfd testFd) closeWrite(t *testing.T) {
+ errno := unix.Close(tfd[1])
+ if errno != nil {
+ t.Fatalf("Failed to close write end of pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) put(t *testing.T) {
+ buf := make([]byte, 10)
+ _, errno := unix.Write(tfd[1], buf)
+ if errno != nil {
+ t.Fatalf("Failed to write to pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) get(t *testing.T) {
+ buf := make([]byte, 10)
+ _, errno := unix.Read(tfd[0], buf)
+ if errno != nil {
+ t.Fatalf("Failed to read from pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) close() {
+ unix.Close(tfd[1])
+ unix.Close(tfd[0])
+}
+
+func makePoller(t *testing.T) (testFd, *fdPoller) {
+ tfd := makeTestFd(t)
+ poller, err := newFdPoller(tfd.fd())
+ if err != nil {
+ t.Fatalf("Failed to create poller: %v", err)
+ }
+ return tfd, poller
+}
+
+func TestPollerWithBadFd(t *testing.T) {
+ _, err := newFdPoller(-1)
+ if err != unix.EBADF {
+ t.Fatalf("Expected EBADF, got: %v", err)
+ }
+}
+
+func TestPollerWithData(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.put(t)
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+ tfd.get(t)
+}
+
+func TestPollerWithWakeup(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if ok {
+ t.Fatalf("expected poller to return false")
+ }
+}
+
+func TestPollerWithClose(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.closeWrite(t)
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+}
+
+func TestPollerWithWakeupAndData(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.put(t)
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+
+ // both data and wakeup
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+
+ // data is still in the buffer, wakeup is cleared
+ ok, err = poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+
+ tfd.get(t)
+ // data is gone, only wakeup now
+ err = poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ ok, err = poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if ok {
+ t.Fatalf("expected poller to return false")
+ }
+}
+
+func TestPollerConcurrent(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ oks := make(chan bool)
+ live := make(chan bool)
+ defer close(live)
+ go func() {
+ defer close(oks)
+ for {
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ oks <- ok
+ if !<-live {
+ return
+ }
+ }
+ }()
+
+ // Try a write
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ tfd.put(t)
+ if !<-oks {
+ t.Fatalf("expected true")
+ }
+ tfd.get(t)
+ live <- true
+
+ // Try a wakeup
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ if <-oks {
+ t.Fatalf("expected false")
+ }
+ live <- true
+
+ // Try a close
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ tfd.closeWrite(t)
+ if !<-oks {
+ t.Fatalf("expected true")
+ }
+ tfd.get(t)
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_test.go b/vendor/github.com/fsnotify/fsnotify/inotify_test.go
new file mode 100644
index 0000000..54f3f00
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify_test.go
@@ -0,0 +1,449 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestInotifyCloseRightAway(t *testing.T) {
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ // Close immediately; it won't even reach the first unix.Read.
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseSlightlyLater(t *testing.T) {
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ // Wait until readEvents has reached unix.Read, and Close.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+ w.Add(testDir)
+
+ // Wait until readEvents has reached unix.Read, and Close.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseAfterRead(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add .")
+ }
+
+ // Generate an event.
+ os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING"))
+
+ // Wait for readEvents to read the event, then close the watcher.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func isWatcherReallyClosed(t *testing.T, w *Watcher) {
+ select {
+ case err, ok := <-w.Errors:
+ if ok {
+ t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err)
+ }
+ default:
+ t.Fatalf("w.Errors would have blocked; readEvents is still alive!")
+ }
+
+ select {
+ case _, ok := <-w.Events:
+ if ok {
+ t.Fatalf("w.Events is not closed; readEvents is still alive after closing")
+ }
+ default:
+ t.Fatalf("w.Events would have blocked; readEvents is still alive!")
+ }
+}
+
+func TestInotifyCloseCreate(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add testDir: %v", err)
+ }
+ h, err := os.Create(filepath.Join(testDir, "testfile"))
+ if err != nil {
+ t.Fatalf("Failed to create file in testdir: %v", err)
+ }
+ h.Close()
+ select {
+ case _ = <-w.Events:
+ case err := <-w.Errors:
+ t.Fatalf("Error from watcher: %v", err)
+ case <-time.After(50 * time.Millisecond):
+ t.Fatalf("Took too long to wait for event")
+ }
+
+ // At this point, we've received one event, so the goroutine is ready.
+ // It's also blocking on unix.Read.
+ // Now we try to swap the file descriptor under its nose.
+ w.Close()
+ w, err = NewWatcher()
+ defer w.Close()
+ if err != nil {
+ t.Fatalf("Failed to create second watcher: %v", err)
+ }
+
+ <-time.After(50 * time.Millisecond)
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Error adding testDir again: %v", err)
+ }
+}
+
+// This test verifies the watcher can keep up with file creations/deletions
+// when under load.
+func TestInotifyStress(t *testing.T) {
+ maxNumToCreate := 1000
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFilePrefix := filepath.Join(testDir, "testfile")
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add testDir: %v", err)
+ }
+
+ doneChan := make(chan struct{})
+ // The buffer ensures that the file generation goroutine is never blocked.
+ errChan := make(chan error, 2*maxNumToCreate)
+
+ go func() {
+ for i := 0; i < maxNumToCreate; i++ {
+ testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ errChan <- fmt.Errorf("Create failed: %v", err)
+ continue
+ }
+
+ err = handle.Close()
+ if err != nil {
+ errChan <- fmt.Errorf("Close failed: %v", err)
+ continue
+ }
+ }
+
+ // If we delete a newly created file too quickly, inotify will skip the
+ // create event and only send the delete event.
+ time.Sleep(100 * time.Millisecond)
+
+ for i := 0; i < maxNumToCreate; i++ {
+ testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
+ err = os.Remove(testFile)
+ if err != nil {
+ errChan <- fmt.Errorf("Remove failed: %v", err)
+ }
+ }
+
+ close(doneChan)
+ }()
+
+ creates := 0
+ removes := 0
+
+ finished := false
+ after := time.After(10 * time.Second)
+ for !finished {
+ select {
+ case <-after:
+ t.Fatalf("Not done")
+ case <-doneChan:
+ finished = true
+ case err := <-errChan:
+ t.Fatalf("Got an error from file creator goroutine: %v", err)
+ case err := <-w.Errors:
+ t.Fatalf("Got an error from watcher: %v", err)
+ case evt := <-w.Events:
+ if !strings.HasPrefix(evt.Name, testFilePrefix) {
+ t.Fatalf("Got an event for an unknown file: %s", evt.Name)
+ }
+ if evt.Op == Create {
+ creates++
+ }
+ if evt.Op == Remove {
+ removes++
+ }
+ }
+ }
+
+ // Drain remaining events from channels
+ count := 0
+ for count < 10 {
+ select {
+ case err := <-errChan:
+ t.Fatalf("Got an error from file creator goroutine: %v", err)
+ case err := <-w.Errors:
+ t.Fatalf("Got an error from watcher: %v", err)
+ case evt := <-w.Events:
+ if !strings.HasPrefix(evt.Name, testFilePrefix) {
+ t.Fatalf("Got an event for an unknown file: %s", evt.Name)
+ }
+ if evt.Op == Create {
+ creates++
+ }
+ if evt.Op == Remove {
+ removes++
+ }
+ count = 0
+ default:
+ count++
+ // Give the watcher chances to fill the channels.
+ time.Sleep(time.Millisecond)
+ }
+ }
+
+ if creates-removes > 1 || creates-removes < -1 {
+ t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes)
+ }
+ if creates < 50 {
+ t.Fatalf("Expected at least 50 creates, got %d", creates)
+ }
+}
+
+func TestInotifyRemoveTwice(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFile := filepath.Join(testDir, "testfile")
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ t.Fatalf("Create failed: %v", err)
+ }
+ handle.Close()
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testFile)
+ if err != nil {
+ t.Fatalf("Failed to add testFile: %v", err)
+ }
+
+ err = w.Remove(testFile)
+ if err != nil {
+ t.Fatalf("wanted successful remove but got: %v", err)
+ }
+
+ err = w.Remove(testFile)
+ if err == nil {
+ t.Fatalf("no error on removing invalid file")
+ }
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if len(w.watches) != 0 {
+ t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
+ }
+ if len(w.paths) != 0 {
+ t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
+ }
+}
+
+func TestInotifyInnerMapLength(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFile := filepath.Join(testDir, "testfile")
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ t.Fatalf("Create failed: %v", err)
+ }
+ handle.Close()
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testFile)
+ if err != nil {
+ t.Fatalf("Failed to add testFile: %v", err)
+ }
+ go func() {
+ for err := range w.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ err = os.Remove(testFile)
+ if err != nil {
+ t.Fatalf("Failed to remove testFile: %v", err)
+ }
+ _ = <-w.Events // consume Remove event
+ <-time.After(50 * time.Millisecond) // wait IN_IGNORE propagated
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if len(w.watches) != 0 {
+ t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
+ }
+ if len(w.paths) != 0 {
+ t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
+ }
+}
+
+func TestInotifyOverflow(t *testing.T) {
+ // We need to generate many more events than the
+ // fs.inotify.max_queued_events sysctl setting.
+ // We use multiple goroutines (one per directory)
+ // to speed up file creation.
+ numDirs := 128
+ numFiles := 1024
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ for dn := 0; dn < numDirs; dn++ {
+ testSubdir := fmt.Sprintf("%s/%d", testDir, dn)
+
+ err := os.Mkdir(testSubdir, 0777)
+ if err != nil {
+ t.Fatalf("Cannot create subdir: %v", err)
+ }
+
+ err = w.Add(testSubdir)
+ if err != nil {
+ t.Fatalf("Failed to add subdir: %v", err)
+ }
+ }
+
+ errChan := make(chan error, numDirs*numFiles)
+
+ for dn := 0; dn < numDirs; dn++ {
+ testSubdir := fmt.Sprintf("%s/%d", testDir, dn)
+
+ go func() {
+ for fn := 0; fn < numFiles; fn++ {
+ testFile := fmt.Sprintf("%s/%d", testSubdir, fn)
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ errChan <- fmt.Errorf("Create failed: %v", err)
+ continue
+ }
+
+ err = handle.Close()
+ if err != nil {
+ errChan <- fmt.Errorf("Close failed: %v", err)
+ continue
+ }
+ }
+ }()
+ }
+
+ creates := 0
+ overflows := 0
+
+ after := time.After(10 * time.Second)
+ for overflows == 0 && creates < numDirs*numFiles {
+ select {
+ case <-after:
+ t.Fatalf("Not done")
+ case err := <-errChan:
+ t.Fatalf("Got an error from file creator goroutine: %v", err)
+ case err := <-w.Errors:
+ if err == ErrEventOverflow {
+ overflows++
+ } else {
+ t.Fatalf("Got an error from watcher: %v", err)
+ }
+ case evt := <-w.Events:
+ if !strings.HasPrefix(evt.Name, testDir) {
+ t.Fatalf("Got an event for an unknown file: %s", evt.Name)
+ }
+ if evt.Op == Create {
+ creates++
+ }
+ }
+ }
+
+ if creates == numDirs*numFiles {
+ t.Fatalf("Could not trigger overflow")
+ }
+
+ if overflows == 0 {
+ t.Fatalf("No overflow and not enough creates (expected %d, got %d)",
+ numDirs*numFiles, creates)
+ }
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/integration_darwin_test.go b/vendor/github.com/fsnotify/fsnotify/integration_darwin_test.go
new file mode 100644
index 0000000..cd6adc2
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/integration_darwin_test.go
@@ -0,0 +1,147 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fsnotify
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// testExchangedataForWatcher tests the watcher with the exchangedata operation on macOS.
+//
+// This is widely used for atomic saves on macOS, e.g. TextMate and in Apple's NSDocument.
+//
+// See https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/exchangedata.2.html
+// Also see: https://github.com/textmate/textmate/blob/cd016be29489eba5f3c09b7b70b06da134dda550/Frameworks/io/src/swap_file_data.cc#L20
+func testExchangedataForWatcher(t *testing.T, watchDir bool) {
+ // Create directory to watch
+ testDir1 := tempMkdir(t)
+
+ // For the intermediate file
+ testDir2 := tempMkdir(t)
+
+ defer os.RemoveAll(testDir1)
+ defer os.RemoveAll(testDir2)
+
+ resolvedFilename := "TestFsnotifyEvents.file"
+
+ // TextMate does:
+ //
+ // 1. exchangedata (intermediate, resolved)
+ // 2. unlink intermediate
+ //
+ // Let's try to simulate that:
+ resolved := filepath.Join(testDir1, resolvedFilename)
+ intermediate := filepath.Join(testDir2, resolvedFilename+"~")
+
+ // Make sure we create the file before we start watching
+ createAndSyncFile(t, resolved)
+
+ watcher := newWatcher(t)
+
+ // Test both variants in isolation
+ if watchDir {
+ addWatch(t, watcher, testDir1)
+ } else {
+ addWatch(t, watcher, resolved)
+ }
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var removeReceived counter
+ var createReceived counter
+
+ done := make(chan bool)
+
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(resolved) {
+ if event.Op&Remove == Remove {
+ removeReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ }
+ t.Logf("event received: %s", event)
+ }
+ done <- true
+ }()
+
+ // Repeat to make sure the watched file/directory "survives" the REMOVE/CREATE loop.
+ for i := 1; i <= 3; i++ {
+ // The intermediate file is created in a folder outside the watcher
+ createAndSyncFile(t, intermediate)
+
+ // 1. Swap
+ if err := unix.Exchangedata(intermediate, resolved, 0); err != nil {
+ t.Fatalf("[%d] exchangedata failed: %s", i, err)
+ }
+
+ time.Sleep(50 * time.Millisecond)
+
+ // 2. Delete the intermediate file
+ err := os.Remove(intermediate)
+
+ if err != nil {
+ t.Fatalf("[%d] remove %s failed: %s", i, intermediate, err)
+ }
+
+ time.Sleep(50 * time.Millisecond)
+
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+
+ // The events will be (CHMOD + REMOVE + CREATE) X 2. Let's focus on the last two:
+ if removeReceived.value() < 3 {
+ t.Fatal("fsnotify remove events have not been received after 500 ms")
+ }
+
+ if createReceived.value() < 3 {
+ t.Fatal("fsnotify create events have not been received after 500 ms")
+ }
+
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+// TestExchangedataInWatchedDir test exchangedata operation on file in watched dir.
+func TestExchangedataInWatchedDir(t *testing.T) {
+ testExchangedataForWatcher(t, true)
+}
+
+// TestExchangedataInWatchedDir test exchangedata operation on watched file.
+func TestExchangedataInWatchedFile(t *testing.T) {
+ testExchangedataForWatcher(t, false)
+}
+
+func createAndSyncFile(t *testing.T, filepath string) {
+ f1, err := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating %s failed: %s", filepath, err)
+ }
+ f1.Sync()
+ f1.Close()
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/integration_test.go b/vendor/github.com/fsnotify/fsnotify/integration_test.go
new file mode 100644
index 0000000..8b7e9d3
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/integration_test.go
@@ -0,0 +1,1237 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!solaris
+
+package fsnotify
+
+import (
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "runtime"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+// An atomic counter
+type counter struct {
+ val int32
+}
+
+func (c *counter) increment() {
+ atomic.AddInt32(&c.val, 1)
+}
+
+func (c *counter) value() int32 {
+ return atomic.LoadInt32(&c.val)
+}
+
+func (c *counter) reset() {
+ atomic.StoreInt32(&c.val, 0)
+}
+
+// tempMkdir makes a temporary directory
+func tempMkdir(t *testing.T) string {
+ dir, err := ioutil.TempDir("", "fsnotify")
+ if err != nil {
+ t.Fatalf("failed to create test directory: %s", err)
+ }
+ return dir
+}
+
+// tempMkFile makes a temporary file.
+func tempMkFile(t *testing.T, dir string) string {
+ f, err := ioutil.TempFile(dir, "fsnotify")
+ if err != nil {
+ t.Fatalf("failed to create test file: %v", err)
+ }
+ defer f.Close()
+ return f.Name()
+}
+
+// newWatcher initializes an fsnotify Watcher instance.
+func newWatcher(t *testing.T) *Watcher {
+ watcher, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("NewWatcher() failed: %s", err)
+ }
+ return watcher
+}
+
+// addWatch adds a watch for a directory
+func addWatch(t *testing.T, watcher *Watcher, dir string) {
+ if err := watcher.Add(dir); err != nil {
+ t.Fatalf("watcher.Add(%q) failed: %s", dir, err)
+ }
+}
+
+func TestFsnotifyMultipleOperations(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory that's not watched
+ testDirToMoveFiles := tempMkdir(t)
+ defer os.RemoveAll(testDirToMoveFiles)
+
+ testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
+ testFileRenamed := filepath.Join(testDirToMoveFiles, "TestFsnotifySeqRename.testfile")
+
+ addWatch(t, watcher, testDir)
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived, renameReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Rename == Rename {
+ renameReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // Modify the file outside of the watched dir
+ f, err = os.Open(testFileRenamed)
+ if err != nil {
+ t.Fatalf("open test renamed file failed: %s", err)
+ }
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Recreate the file that was moved
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Close()
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived != 1 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
+ }
+ dReceived := deleteReceived.value()
+ rReceived := renameReceived.value()
+ if dReceived+rReceived != 1 {
+ t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", rReceived+dReceived, 1)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyMultipleCreates(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
+
+ addWatch(t, watcher, testDir)
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ os.Remove(testFile)
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Recreate the file
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Close()
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Modify
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Modify
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived < 3 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs atleast %d)", mReceived, 3)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 1 {
+ t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", dReceived, 1)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyDirOnly(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ // This should NOT add any events to the fsnotify event queue
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyDirOnly.testfile")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileAlreadyExists) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ os.Remove(testFile)
+ os.Remove(testFileAlreadyExists)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 1 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 1)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived != 1 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 2 {
+ t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyDeleteWatchedDir(t *testing.T) {
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ addWatch(t, watcher, testDir)
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFileAlreadyExists)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var deleteReceived counter
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFileAlreadyExists) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ }()
+
+ os.RemoveAll(testDir)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ dReceived := deleteReceived.value()
+ if dReceived < 2 {
+ t.Fatalf("did not receive at least %d delete events, received %d after 500 ms", 2, dReceived)
+ }
+}
+
+func TestFsnotifySubDir(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ testFile1 := filepath.Join(testDir, "TestFsnotifyFile1.testfile")
+ testSubDir := filepath.Join(testDir, "sub")
+ testSubDirFile := filepath.Join(testDir, "sub/TestFsnotifyFile1.testfile")
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testSubDir) || event.Name == filepath.Clean(testFile1) {
+ t.Logf("event received: %s", event)
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ addWatch(t, watcher, testDir)
+
+ // Create sub-directory
+ if err := os.Mkdir(testSubDir, 0777); err != nil {
+ t.Fatalf("failed to create test sub-directory: %s", err)
+ }
+
+ // Create a file
+ var f *os.File
+ f, err := os.OpenFile(testFile1, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ // Create a file (Should not see this! we are not watching subdir)
+ var fs *os.File
+ fs, err = os.OpenFile(testSubDirFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ fs.Sync()
+ fs.Close()
+
+ time.Sleep(200 * time.Millisecond)
+
+ // Make sure receive deletes for both file and sub-directory
+ os.RemoveAll(testSubDir)
+ os.Remove(testFile1)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 2 {
+ t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyRename(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var renameReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
+ if event.Op&Rename == Rename {
+ renameReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFile)
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if renameReceived.value() == 0 {
+ t.Fatal("fsnotify rename events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestFsnotifyRenameToCreate(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory to get file
+ testDirFrom := tempMkdir(t)
+ defer os.RemoveAll(testDirFrom)
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if createReceived.value() == 0 {
+ t.Fatal("fsnotify create events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestFsnotifyRenameToOverwrite(t *testing.T) {
+ switch runtime.GOOS {
+ case "plan9", "windows":
+ t.Skipf("skipping test on %q (os.Rename over existing file does not create event).", runtime.GOOS)
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory to get file
+ testDirFrom := tempMkdir(t)
+ defer os.RemoveAll(testDirFrom)
+
+ testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Create a file
+ var fr *os.File
+ fr, err := os.OpenFile(testFileRenamed, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ fr.Sync()
+ fr.Close()
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var eventReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testFileRenamed) {
+ eventReceived.increment()
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if eventReceived.value() == 0 {
+ t.Fatal("fsnotify events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestRemovalOfWatch(t *testing.T) {
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ addWatch(t, watcher, testDir)
+ if err := watcher.Remove(testDir); err != nil {
+ t.Fatalf("Could not remove the watch: %v\n", err)
+ }
+
+ go func() {
+ select {
+ case ev := <-watcher.Events:
+ t.Fatalf("We received event: %v\n", ev)
+ case <-time.After(500 * time.Millisecond):
+ t.Log("No event received, as expected.")
+ }
+ }()
+
+ time.Sleep(200 * time.Millisecond)
+ // Modify the file outside of the watched dir
+ f, err := os.Open(testFileAlreadyExists)
+ if err != nil {
+ t.Fatalf("Open test file failed: %s", err)
+ }
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+ if err := os.Chmod(testFileAlreadyExists, 0700); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+ time.Sleep(400 * time.Millisecond)
+}
+
+func TestFsnotifyAttrib(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("attributes don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyAttrib.testfile")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ // The modifyReceived counter counts IsModify events that are not IsAttrib,
+ // and the attribReceived counts IsAttrib events (which are also IsModify as
+ // a consequence).
+ var modifyReceived counter
+ var attribReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Chmod == Chmod {
+ attribReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFile)
+
+ if err := os.Chmod(testFile, 0700); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ // Creating/writing a file changes also the mtime, so IsAttrib should be set to true here
+ time.Sleep(500 * time.Millisecond)
+ if modifyReceived.value() != 0 {
+ t.Fatal("received an unexpected modify event when creating a test file")
+ }
+ if attribReceived.value() == 0 {
+ t.Fatal("fsnotify attribute events have not received after 500 ms")
+ }
+
+ // Modifying the contents of the file does not set the attrib flag (although eg. the mtime
+ // might have been modified).
+ modifyReceived.reset()
+ attribReceived.reset()
+
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0)
+ if err != nil {
+ t.Fatalf("reopening test file failed: %s", err)
+ }
+
+ f.WriteString("more data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(500 * time.Millisecond)
+
+ if modifyReceived.value() != 1 {
+ t.Fatal("didn't receive a modify event after changing test file contents")
+ }
+
+ if attribReceived.value() != 0 {
+ t.Fatal("did receive an unexpected attrib event after changing test file contents")
+ }
+
+ modifyReceived.reset()
+ attribReceived.reset()
+
+ // Doing a chmod on the file should trigger an event with the "attrib" flag set (the contents
+ // of the file are not changed though)
+ if err := os.Chmod(testFile, 0600); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+
+ time.Sleep(500 * time.Millisecond)
+
+ if attribReceived.value() != 1 {
+ t.Fatal("didn't receive an attribute change after 500ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(1e9):
+ t.Fatal("event stream was not closed after 1 second")
+ }
+
+ os.Remove(testFile)
+}
+
+func TestFsnotifyClose(t *testing.T) {
+ watcher := newWatcher(t)
+ watcher.Close()
+
+ var done int32
+ go func() {
+ watcher.Close()
+ atomic.StoreInt32(&done, 1)
+ }()
+
+ time.Sleep(50e6) // 50 ms
+ if atomic.LoadInt32(&done) == 0 {
+ t.Fatal("double Close() test failed: second Close() call didn't return")
+ }
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ if err := watcher.Add(testDir); err == nil {
+ t.Fatal("expected error on Watch() after Close(), got nil")
+ }
+}
+
+func TestFsnotifyFakeSymlink(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ var errorsReceived counter
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for errors := range watcher.Errors {
+ t.Logf("Received error: %s", errors)
+ errorsReceived.increment()
+ }
+ }()
+
+ // Count the CREATE events received
+ var createEventsReceived, otherEventsReceived counter
+ go func() {
+ for ev := range watcher.Events {
+ t.Logf("event received: %s", ev)
+ if ev.Op&Create == Create {
+ createEventsReceived.increment()
+ } else {
+ otherEventsReceived.increment()
+ }
+ }
+ }()
+
+ addWatch(t, watcher, testDir)
+
+ if err := os.Symlink(filepath.Join(testDir, "zzz"), filepath.Join(testDir, "zzznew")); err != nil {
+ t.Fatalf("Failed to create bogus symlink: %s", err)
+ }
+ t.Logf("Created bogus symlink")
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+
+ // Should not be error, just no events for broken links (watching nothing)
+ if errorsReceived.value() > 0 {
+ t.Fatal("fsnotify errors have been received.")
+ }
+ if otherEventsReceived.value() > 0 {
+ t.Fatal("fsnotify other events received on the broken link")
+ }
+
+ // Except for 1 create event (for the link itself)
+ if createEventsReceived.value() == 0 {
+ t.Fatal("fsnotify create events were not received after 500 ms")
+ }
+ if createEventsReceived.value() > 1 {
+ t.Fatal("fsnotify more create events received than expected")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+}
+
+func TestCyclicSymlink(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ link := path.Join(testDir, "link")
+ if err := os.Symlink(".", link); err != nil {
+ t.Fatalf("could not make symlink: %v", err)
+ }
+ addWatch(t, watcher, testDir)
+
+ var createEventsReceived counter
+ go func() {
+ for ev := range watcher.Events {
+ if ev.Op&Create == Create {
+ createEventsReceived.increment()
+ }
+ }
+ }()
+
+ if err := os.Remove(link); err != nil {
+ t.Fatalf("Error removing link: %v", err)
+ }
+
+ // It would be nice to be able to expect a delete event here, but kqueue has
+ // no way for us to get events on symlinks themselves, because opening them
+ // opens an fd to the file to which they point.
+
+ if err := ioutil.WriteFile(link, []byte("foo"), 0700); err != nil {
+ t.Fatalf("could not make symlink: %v", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+
+ if got := createEventsReceived.value(); got == 0 {
+ t.Errorf("want at least 1 create event got %v", got)
+ }
+
+ watcher.Close()
+}
+
+// TestConcurrentRemovalOfWatch tests that concurrent calls to RemoveWatch do not race.
+// See https://codereview.appspot.com/103300045/
+// go test -test.run=TestConcurrentRemovalOfWatch -test.cpu=1,1,1,1,1 -race
+func TestConcurrentRemovalOfWatch(t *testing.T) {
+ if runtime.GOOS != "darwin" {
+ t.Skip("regression test for race only present on darwin")
+ }
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ addWatch(t, watcher, testDir)
+
+ // Test that RemoveWatch can be invoked concurrently, with no data races.
+ removed1 := make(chan struct{})
+ go func() {
+ defer close(removed1)
+ watcher.Remove(testDir)
+ }()
+ removed2 := make(chan struct{})
+ go func() {
+ close(removed2)
+ watcher.Remove(testDir)
+ }()
+ <-removed1
+ <-removed2
+}
+
+func TestClose(t *testing.T) {
+ // Regression test for #59 bad file descriptor from Close
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ watcher := newWatcher(t)
+ if err := watcher.Add(testDir); err != nil {
+ t.Fatalf("Expected no error on Add, got %v", err)
+ }
+ err := watcher.Close()
+ if err != nil {
+ t.Fatalf("Expected no error on Close, got %v.", err)
+ }
+}
+
+// TestRemoveWithClose tests if one can handle Remove events and, at the same
+// time, close Watcher object without any data races.
+func TestRemoveWithClose(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ const fileN = 200
+ tempFiles := make([]string, 0, fileN)
+ for i := 0; i < fileN; i++ {
+ tempFiles = append(tempFiles, tempMkFile(t, testDir))
+ }
+ watcher := newWatcher(t)
+ if err := watcher.Add(testDir); err != nil {
+ t.Fatalf("Expected no error on Add, got %v", err)
+ }
+ startC, stopC := make(chan struct{}), make(chan struct{})
+ errC := make(chan error)
+ go func() {
+ for {
+ select {
+ case <-watcher.Errors:
+ case <-watcher.Events:
+ case <-stopC:
+ return
+ }
+ }
+ }()
+ go func() {
+ <-startC
+ for _, fileName := range tempFiles {
+ os.Remove(fileName)
+ }
+ }()
+ go func() {
+ <-startC
+ errC <- watcher.Close()
+ }()
+ close(startC)
+ defer close(stopC)
+ if err := <-errC; err != nil {
+ t.Fatalf("Expected no error on Close, got %v.", err)
+ }
+}
+
+func testRename(file1, file2 string) error {
+ switch runtime.GOOS {
+ case "windows", "plan9":
+ return os.Rename(file1, file2)
+ default:
+ cmd := exec.Command("mv", file1, file2)
+ return cmd.Run()
+ }
+}
diff --git a/vendor/github.com/gopherjs/gopherjs/build/build_test.go b/vendor/github.com/gopherjs/gopherjs/build/build_test.go
new file mode 100644
index 0000000..659aff3
--- /dev/null
+++ b/vendor/github.com/gopherjs/gopherjs/build/build_test.go
@@ -0,0 +1,199 @@
+package build
+
+import (
+ "fmt"
+ gobuild "go/build"
+ "go/token"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/kisielk/gotool"
+ "github.com/shurcooL/go/importgraphutil"
+)
+
+// Natives augment the standard library with GopherJS-specific changes.
+// This test ensures that none of the standard library packages are modified
+// in a way that adds imports which the original upstream standard library package
+// does not already import. Doing that can increase generated output size or cause
+// other unexpected issues (since the cmd/go tool does not know about these extra imports),
+// so it's best to avoid it.
+//
+// It checks all standard library packages. Each package is considered as a normal
+// package, as a test package, and as an external test package.
+func TestNativesDontImportExtraPackages(t *testing.T) {
+ // Calculate the forward import graph for all standard library packages.
+ // It's needed for populateImportSet.
+ stdOnly := gobuild.Default
+ stdOnly.GOPATH = "" // We only care about standard library, so skip all GOPATH packages.
+ forward, _, err := importgraphutil.BuildNoTests(&stdOnly)
+ if err != nil {
+ t.Fatalf("importgraphutil.BuildNoTests: %v", err)
+ }
+
+ // populateImportSet takes a slice of imports, and populates set with those
+ // imports, as well as their transitive dependencies. That way, the set can
+ // be quickly queried to check if a package is in the import graph of imports.
+ //
+ // Note, this does not include transitive imports of test/xtest packages,
+ // which could cause some false positives. It currently doesn't, but if it does,
+ // then support for that should be added here.
+ populateImportSet := func(imports []string, set *stringSet) {
+ for _, p := range imports {
+ (*set)[p] = struct{}{}
+ switch p {
+ case "sync":
+ (*set)["github.com/gopherjs/gopherjs/nosync"] = struct{}{}
+ }
+ transitiveImports := forward.Search(p)
+ for p := range transitiveImports {
+ (*set)[p] = struct{}{}
+ }
+ }
+ }
+
+ // Check all standard library packages.
+ //
+ // The general strategy is to first import each standard library package using the
+ // normal build.Import, which returns a *build.Package. That contains Imports, TestImports,
+ // and XTestImports values that are considered the "real imports".
+ //
+ // That list of direct imports is then expanded to the transitive closure by populateImportSet,
+ // meaning all packages that are indirectly imported are also added to the set.
+ //
+ // Then, github.com/gopherjs/gopherjs/build.parseAndAugment(*build.Package) returns []*ast.File.
+ // Those augmented parsed Go files of the package are checked, one file at at time, one import
+ // at a time. Each import is verified to belong in the set of allowed real imports.
+ for _, pkg := range gotool.ImportPaths([]string{"std"}) {
+ // Normal package.
+ {
+ // Import the real normal package, and populate its real import set.
+ bpkg, err := gobuild.Import(pkg, "", gobuild.ImportComment)
+ if err != nil {
+ t.Fatalf("gobuild.Import: %v", err)
+ }
+ realImports := make(stringSet)
+ populateImportSet(bpkg.Imports, &realImports)
+
+ // Use parseAndAugment to get a list of augmented AST files.
+ fset := token.NewFileSet()
+ files, err := parseAndAugment(NewBuildContext("", nil), bpkg, false, fset)
+ if err != nil {
+ t.Fatalf("github.com/gopherjs/gopherjs/build.parseAndAugment: %v", err)
+ }
+
+ // Verify imports of normal augmented AST files.
+ for _, f := range files {
+ fileName := fset.File(f.Pos()).Name()
+ normalFile := !strings.HasSuffix(fileName, "_test.go")
+ if !normalFile {
+ continue
+ }
+ for _, imp := range f.Imports {
+ importPath, err := strconv.Unquote(imp.Path.Value)
+ if err != nil {
+ t.Fatalf("strconv.Unquote(%v): %v", imp.Path.Value, err)
+ }
+ if importPath == "github.com/gopherjs/gopherjs/js" {
+ continue
+ }
+ if _, ok := realImports[importPath]; !ok {
+ t.Errorf("augmented normal package %q imports %q in file %v, but real %q doesn't:\nrealImports = %v", bpkg.ImportPath, importPath, fileName, bpkg.ImportPath, realImports)
+ }
+ }
+ }
+ }
+
+ // Test package.
+ {
+ // Import the real test package, and populate its real import set.
+ bpkg, err := gobuild.Import(pkg, "", gobuild.ImportComment)
+ if err != nil {
+ t.Fatalf("gobuild.Import: %v", err)
+ }
+ realTestImports := make(stringSet)
+ populateImportSet(bpkg.TestImports, &realTestImports)
+
+ // Use parseAndAugment to get a list of augmented AST files.
+ fset := token.NewFileSet()
+ files, err := parseAndAugment(NewBuildContext("", nil), bpkg, true, fset)
+ if err != nil {
+ t.Fatalf("github.com/gopherjs/gopherjs/build.parseAndAugment: %v", err)
+ }
+
+ // Verify imports of test augmented AST files.
+ for _, f := range files {
+ fileName, pkgName := fset.File(f.Pos()).Name(), f.Name.String()
+ testFile := strings.HasSuffix(fileName, "_test.go") && !strings.HasSuffix(pkgName, "_test")
+ if !testFile {
+ continue
+ }
+ for _, imp := range f.Imports {
+ importPath, err := strconv.Unquote(imp.Path.Value)
+ if err != nil {
+ t.Fatalf("strconv.Unquote(%v): %v", imp.Path.Value, err)
+ }
+ if importPath == "github.com/gopherjs/gopherjs/js" {
+ continue
+ }
+ if _, ok := realTestImports[importPath]; !ok {
+ t.Errorf("augmented test package %q imports %q in file %v, but real %q doesn't:\nrealTestImports = %v", bpkg.ImportPath, importPath, fileName, bpkg.ImportPath, realTestImports)
+ }
+ }
+ }
+ }
+
+ // External test package.
+ {
+ // Import the real external test package, and populate its real import set.
+ bpkg, err := gobuild.Import(pkg, "", gobuild.ImportComment)
+ if err != nil {
+ t.Fatalf("gobuild.Import: %v", err)
+ }
+ realXTestImports := make(stringSet)
+ populateImportSet(bpkg.XTestImports, &realXTestImports)
+
+ // Add _test suffix to import path to cause parseAndAugment to use external test mode.
+ bpkg.ImportPath += "_test"
+
+ // Use parseAndAugment to get a list of augmented AST files, then check only the external test files.
+ fset := token.NewFileSet()
+ files, err := parseAndAugment(NewBuildContext("", nil), bpkg, true, fset)
+ if err != nil {
+ t.Fatalf("github.com/gopherjs/gopherjs/build.parseAndAugment: %v", err)
+ }
+
+ // Verify imports of external test augmented AST files.
+ for _, f := range files {
+ fileName, pkgName := fset.File(f.Pos()).Name(), f.Name.String()
+ xTestFile := strings.HasSuffix(fileName, "_test.go") && strings.HasSuffix(pkgName, "_test")
+ if !xTestFile {
+ continue
+ }
+ for _, imp := range f.Imports {
+ importPath, err := strconv.Unquote(imp.Path.Value)
+ if err != nil {
+ t.Fatalf("strconv.Unquote(%v): %v", imp.Path.Value, err)
+ }
+ if importPath == "github.com/gopherjs/gopherjs/js" {
+ continue
+ }
+ if _, ok := realXTestImports[importPath]; !ok {
+ t.Errorf("augmented external test package %q imports %q in file %v, but real %q doesn't:\nrealXTestImports = %v", bpkg.ImportPath, importPath, fileName, bpkg.ImportPath, realXTestImports)
+ }
+ }
+ }
+ }
+ }
+}
+
+// stringSet is used to print a set of strings in a more readable way.
+type stringSet map[string]struct{}
+
+func (m stringSet) String() string {
+ s := make([]string, 0, len(m))
+ for v := range m {
+ s = append(s, v)
+ }
+ return fmt.Sprintf("%q", s)
+}
diff --git a/vendor/github.com/kisielk/gotool/internal/load/match_test.go b/vendor/github.com/kisielk/gotool/internal/load/match_test.go
new file mode 100644
index 0000000..3c18474
--- /dev/null
+++ b/vendor/github.com/kisielk/gotool/internal/load/match_test.go
@@ -0,0 +1,167 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.9
+
+package load
+
+import (
+ "strings"
+ "testing"
+)
+
+var matchPatternTests = `
+ pattern ...
+ match foo
+
+ pattern net
+ match net
+ not net/http
+
+ pattern net/http
+ match net/http
+ not net
+
+ pattern net...
+ match net net/http netchan
+ not not/http not/net/http
+
+ # Special cases. Quoting docs:
+
+ # First, /... at the end of the pattern can match an empty string,
+ # so that net/... matches both net and packages in its subdirectories, like net/http.
+ pattern net/...
+ match net net/http
+ not not/http not/net/http netchan
+
+ # Second, any slash-separted pattern element containing a wildcard never
+ # participates in a match of the "vendor" element in the path of a vendored
+ # package, so that ./... does not match packages in subdirectories of
+ # ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do.
+ # Note, however, that a directory named vendor that itself contains code
+ # is not a vendored package: cmd/vendor would be a command named vendor,
+ # and the pattern cmd/... matches it.
+ pattern ./...
+ match ./vendor ./mycode/vendor
+ not ./vendor/foo ./mycode/vendor/foo
+
+ pattern ./vendor/...
+ match ./vendor/foo ./vendor/foo/vendor
+ not ./vendor/foo/vendor/bar
+
+ pattern mycode/vendor/...
+ match mycode/vendor mycode/vendor/foo mycode/vendor/foo/vendor
+ not mycode/vendor/foo/vendor/bar
+
+ pattern x/vendor/y
+ match x/vendor/y
+ not x/vendor
+
+ pattern x/vendor/y/...
+ match x/vendor/y x/vendor/y/z x/vendor/y/vendor x/vendor/y/z/vendor
+ not x/vendor/y/vendor/z
+
+ pattern .../vendor/...
+ match x/vendor/y x/vendor/y/z x/vendor/y/vendor x/vendor/y/z/vendor
+`
+
+func TestMatchPattern(t *testing.T) {
+ testPatterns(t, "matchPattern", matchPatternTests, func(pattern, name string) bool {
+ return matchPattern(pattern)(name)
+ })
+}
+
+var treeCanMatchPatternTests = `
+ pattern ...
+ match foo
+
+ pattern net
+ match net
+ not net/http
+
+ pattern net/http
+ match net net/http
+
+ pattern net...
+ match net netchan net/http
+ not not/http not/net/http
+
+ pattern net/...
+ match net net/http
+ not not/http netchan
+
+ pattern abc.../def
+ match abcxyz
+ not xyzabc
+
+ pattern x/y/z/...
+ match x x/y x/y/z x/y/z/w
+
+ pattern x/y/z
+ match x x/y x/y/z
+ not x/y/z/w
+
+ pattern x/.../y/z
+ match x/a/b/c
+ not y/x/a/b/c
+`
+
+func TestTreeCanMatchPattern(t *testing.T) {
+ testPatterns(t, "treeCanMatchPattern", treeCanMatchPatternTests, func(pattern, name string) bool {
+ return treeCanMatchPattern(pattern)(name)
+ })
+}
+
+var hasPathPrefixTests = []stringPairTest{
+ {"abc", "a", false},
+ {"a/bc", "a", true},
+ {"a", "a", true},
+ {"a/bc", "a/", true},
+}
+
+func TestHasPathPrefix(t *testing.T) {
+ testStringPairs(t, "hasPathPrefix", hasPathPrefixTests, hasPathPrefix)
+}
+
+type stringPairTest struct {
+ in1 string
+ in2 string
+ out bool
+}
+
+func testStringPairs(t *testing.T, name string, tests []stringPairTest, f func(string, string) bool) {
+ for _, tt := range tests {
+ if out := f(tt.in1, tt.in2); out != tt.out {
+ t.Errorf("%s(%q, %q) = %v, want %v", name, tt.in1, tt.in2, out, tt.out)
+ }
+ }
+}
+
+func testPatterns(t *testing.T, name, tests string, fn func(string, string) bool) {
+ var patterns []string
+ for _, line := range strings.Split(tests, "\n") {
+ if i := strings.Index(line, "#"); i >= 0 {
+ line = line[:i]
+ }
+ f := strings.Fields(line)
+ if len(f) == 0 {
+ continue
+ }
+ switch f[0] {
+ default:
+ t.Fatalf("unknown directive %q", f[0])
+ case "pattern":
+ patterns = f[1:]
+ case "match", "not":
+ want := f[0] == "match"
+ for _, pattern := range patterns {
+ for _, in := range f[1:] {
+ if fn(pattern, in) != want {
+ t.Errorf("%s(%q, %q) = %v, want %v", name, pattern, in, !want, want)
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/kisielk/gotool/match18_test.go b/vendor/github.com/kisielk/gotool/match18_test.go
new file mode 100644
index 0000000..59112ac
--- /dev/null
+++ b/vendor/github.com/kisielk/gotool/match18_test.go
@@ -0,0 +1,136 @@
+// Copyright (c) 2009 The Go Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !go1.9
+
+package gotool
+
+import (
+ "sort"
+ "testing"
+)
+
+// This file contains code from the Go distribution.
+
+var matchPatternTests = []stringPairTest{
+ {"...", "foo", true},
+ {"net", "net", true},
+ {"net", "net/http", false},
+ {"net/http", "net", false},
+ {"net/http", "net/http", true},
+ {"net...", "netchan", true},
+ {"net...", "net", true},
+ {"net...", "net/http", true},
+ {"net...", "not/http", false},
+ {"net/...", "netchan", false},
+ {"net/...", "net", true},
+ {"net/...", "net/http", true},
+ {"net/...", "not/http", false},
+}
+
+func TestMatchPattern(t *testing.T) {
+ testStringPairs(t, "matchPattern", matchPatternTests, func(pattern, name string) bool {
+ return matchPattern(pattern)(name)
+ })
+}
+
+var treeCanMatchPatternTests = []stringPairTest{
+ {"...", "foo", true},
+ {"net", "net", true},
+ {"net", "net/http", false},
+ {"net/http", "net", true},
+ {"net/http", "net/http", true},
+ {"net...", "netchan", true},
+ {"net...", "net", true},
+ {"net...", "net/http", true},
+ {"net...", "not/http", false},
+ {"net/...", "netchan", false},
+ {"net/...", "net", true},
+ {"net/...", "net/http", true},
+ {"net/...", "not/http", false},
+ {"abc.../def", "abcxyz", true},
+ {"abc.../def", "xyxabc", false},
+ {"x/y/z/...", "x", true},
+ {"x/y/z/...", "x/y", true},
+ {"x/y/z/...", "x/y/z", true},
+ {"x/y/z/...", "x/y/z/w", true},
+ {"x/y/z", "x", true},
+ {"x/y/z", "x/y", true},
+ {"x/y/z", "x/y/z", true},
+ {"x/y/z", "x/y/z/w", false},
+ {"x/.../y/z", "x/a/b/c", true},
+ {"x/.../y/z", "y/x/a/b/c", false},
+}
+
+func TestChildrenCanMatchPattern(t *testing.T) {
+ testStringPairs(t, "treeCanMatchPattern", treeCanMatchPatternTests, func(pattern, name string) bool {
+ return treeCanMatchPattern(pattern)(name)
+ })
+}
+
+var hasPathPrefixTests = []stringPairTest{
+ {"abc", "a", false},
+ {"a/bc", "a", true},
+ {"a", "a", true},
+ {"a/bc", "a/", true},
+}
+
+func TestHasPathPrefix(t *testing.T) {
+ testStringPairs(t, "hasPathPrefix", hasPathPrefixTests, hasPathPrefix)
+}
+
+type stringPairTest struct {
+ in1 string
+ in2 string
+ out bool
+}
+
+func testStringPairs(t *testing.T, name string, tests []stringPairTest, f func(string, string) bool) {
+ for _, tt := range tests {
+ if out := f(tt.in1, tt.in2); out != tt.out {
+ t.Errorf("%s(%q, %q) = %v, want %v", name, tt.in1, tt.in2, out, tt.out)
+ }
+ }
+}
+
+// containsString reports whether strings contains x. strings is assumed to be sorted.
+func containsString(strings []string, x string) bool {
+ return strings[sort.SearchStrings(strings, x)] == x
+}
+
+func TestMatchStdPackages(t *testing.T) {
+ packages := DefaultContext.matchPackages("std")
+ sort.Strings(packages)
+ // some common packages all Go versions should have
+ commonPackages := []string{"bufio", "bytes", "crypto", "fmt", "io", "os"}
+ for _, p := range commonPackages {
+ if !containsString(packages, p) {
+ t.Errorf("std package set doesn't contain expected package %s", p)
+ }
+ }
+}
diff --git a/vendor/github.com/neelance/astrewrite/simplify_test.go b/vendor/github.com/neelance/astrewrite/simplify_test.go
new file mode 100644
index 0000000..49062df
--- /dev/null
+++ b/vendor/github.com/neelance/astrewrite/simplify_test.go
@@ -0,0 +1,190 @@
+package astrewrite
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/importer"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "go/types"
+ "io/ioutil"
+ "testing"
+)
+
+func TestSimplify(t *testing.T) {
+ simplifyAndCompareStmts(t, "-a()", "_1 := a(); -_1")
+ simplifyAndCompareStmts(t, "a() + b()", "_1 := a(); _2 := b(); _1 + _2")
+ simplifyAndCompareStmts(t, "f(g(), h())", "_1 := g(); _2 := h(); f(_1, _2)")
+ simplifyAndCompareStmts(t, "f().x", "_1 := f(); _1.x")
+ simplifyAndCompareStmts(t, "f()()", "_1 := f(); _1()")
+ simplifyAndCompareStmts(t, "x.f()", "x.f()")
+ simplifyAndCompareStmts(t, "f()[g()]", "_1 := f(); _2 := g(); _1[_2]")
+ simplifyAndCompareStmts(t, "f()[g():h()]", "_1 := f(); _2 := g(); _3 := h(); _1[_2:_3]")
+ simplifyAndCompareStmts(t, "f()[g():h():i()]", "_1 := f(); _2 := g(); _3 := h(); _4 := i(); _1[_2:_3:_4]")
+ simplifyAndCompareStmts(t, "*f()", "_1 := f(); *_1")
+ simplifyAndCompareStmts(t, "f().(t)", "_1 := f(); _1.(t)")
+ simplifyAndCompareStmts(t, "func() { -a() }", "func() { _1 := a(); -_1 }")
+ simplifyAndCompareStmts(t, "T{a(), b()}", "_1 := a(); _2 := b(); T{_1, _2}")
+ simplifyAndCompareStmts(t, "T{A: a(), B: b()}", "_1 := a(); _2 := b(); T{A: _1, B: _2}")
+ simplifyAndCompareStmts(t, "func() { a()() }", "func() { _1 := a(); _1() }")
+
+ simplifyAndCompareStmts(t, "a() && b", "_1 := a(); _1 && b")
+ simplifyAndCompareStmts(t, "a && b()", "_1 := a; if _1 { _1 = b() }; _1")
+ simplifyAndCompareStmts(t, "a() && b()", "_1 := a(); if _1 { _1 = b() }; _1")
+
+ simplifyAndCompareStmts(t, "a() || b", "_1 := a(); _1 || b")
+ simplifyAndCompareStmts(t, "a || b()", "_1 := a; if !_1 { _1 = b() }; _1")
+ simplifyAndCompareStmts(t, "a() || b()", "_1 := a(); if !_1 { _1 = b() }; _1")
+
+ simplifyAndCompareStmts(t, "a && (b || c())", "_1 := a; if(_1) { _2 := b; if(!_2) { _2 = c() }; _1 = (_2) }; _1")
+
+ simplifyAndCompareStmts(t, "a := b()()", "_1 := b(); a := _1()")
+ simplifyAndCompareStmts(t, "a().f = b", "_1 := a(); _1.f = b")
+ simplifyAndCompareStmts(t, "var a int = b()", "_1 := b(); var a int = _1")
+
+ simplifyAndCompareStmts(t, "if a() { b }", "_1 := a(); if _1 { b }")
+ simplifyAndCompareStmts(t, "if a := b(); a { c }", "{ a := b(); if a { c } }")
+ simplifyAndCompareStmts(t, "if a { b()() }", "if a { _1 := b(); _1() }")
+ simplifyAndCompareStmts(t, "if a { b } else { c()() }", "if a { b } else { _1 := c(); _1() }")
+ simplifyAndCompareStmts(t, "if a { b } else if c { d()() }", "if a { b } else if c { _1 := d(); _1() }")
+ simplifyAndCompareStmts(t, "if a { b } else if c() { d }", "if a { b } else { _1 := c(); if _1 { d } }")
+ simplifyAndCompareStmts(t, "if a { b } else if c := d(); c { e }", "if a { b } else { c := d(); if c { e } }")
+
+ simplifyAndCompareStmts(t, "l: switch a { case b, c: d()() }", "l: switch { default: _1 := a; if _1 == (b) || _1 == (c) { _2 := d(); _2() } }")
+ simplifyAndCompareStmts(t, "switch a() { case b: c }", "switch { default: _1 := a(); if _1 == (b) { c } }")
+ simplifyAndCompareStmts(t, "switch x := a(); x { case b, c: d }", "switch { default: x := a(); _1 := x; if _1 == (b) || _1 == (c) { d } }")
+ simplifyAndCompareStmts(t, "switch a() { case b: c; default: e; case c: d }", "switch { default: _1 := a(); if _1 == (b) { c } else if _1 == (c) { d } else { e } }")
+ simplifyAndCompareStmts(t, "switch a { case b(): c }", "switch { default: _1 := a; _2 := b(); if _1 == (_2) { c } }")
+ simplifyAndCompareStmts(t, "switch a { default: d; fallthrough; case b: c }", "switch { default: _1 := a; if _1 == (b) { c } else { d; c } }")
+ simplifyAndCompareStmts(t, "switch a := 0; a {}", "switch { default: a := 0; _ = a }")
+ simplifyAndCompareStmts(t, "switch a := 0; a { default: }", "switch { default: a := 0; _ = a }")
+
+ simplifyAndCompareStmts(t, "switch a().(type) { case b, c: d }", "_1 := a(); switch _1.(type) { case b, c: d }")
+ simplifyAndCompareStmts(t, "switch x := a(); x.(type) { case b: c }", "{ x := a(); switch x.(type) { case b: c } }")
+ simplifyAndCompareStmts(t, "switch a := b().(type) { case c: d }", "_1 := b(); switch a := _1.(type) { case c: d }")
+ simplifyAndCompareStmts(t, "switch a.(type) { case b, c: d()() }", "switch a.(type) { case b, c: _1 := d(); _1() }")
+
+ simplifyAndCompareStmts(t, "for a { b()() }", "for a { _1 := b(); _1() }")
+ // simplifyAndCompareStmts(t, "for a() { b() }", "for { _1 := a(); if !_1 { break }; b() }")
+
+ simplifyAndCompareStmts(t, "select { case <-a: b()(); default: c()() }", "select { case <-a: _1 := b(); _1(); default: _2 := c(); _2() }")
+ simplifyAndCompareStmts(t, "select { case <-a(): b; case <-c(): d }", "_1 := a(); _2 := c(); select { case <-_1: b; case <-_2: d }")
+ simplifyAndCompareStmts(t, "var d int; select { case a().f, a().g = <-b(): c; case d = <-e(): f }", "var d int; _5 := b(); _6 := e(); select { case _1, _3 := <-_5: _2 := a(); _2.f = _1; _4 := a(); _4.g = _3; c; case d = <-_6: f }")
+ simplifyAndCompareStmts(t, "select { case a() <- b(): c; case d() <- e(): f }", "_1 := a(); _2 := b(); _3 := d(); _4 := e(); select { case _1 <- _2: c; case _3 <- _4: f }")
+
+ simplifyAndCompareStmts(t, "a().f++", "_1 := a(); _1.f++")
+ simplifyAndCompareStmts(t, "return a()", "_1 := a(); return _1")
+ simplifyAndCompareStmts(t, "go a()()", "_1 := a(); go _1()")
+ simplifyAndCompareStmts(t, "defer a()()", "_1 := a(); defer _1()")
+ simplifyAndCompareStmts(t, "a() <- b", "_1 := a(); _1 <- b")
+ simplifyAndCompareStmts(t, "a <- b()", "_1 := b(); a <- _1")
+
+ for _, name := range []string{"var", "tuple", "range"} {
+ fset := token.NewFileSet()
+ inFile, err := parser.ParseFile(fset, fmt.Sprintf("testdata/%s.go", name), nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ typesInfo := &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Scopes: make(map[ast.Node]*types.Scope),
+ }
+ config := &types.Config{
+ Importer: importer.Default(),
+ }
+ if _, err := config.Check("main", fset, []*ast.File{inFile}, typesInfo); err != nil {
+ t.Fatal(err)
+ }
+
+ outFile := Simplify(inFile, typesInfo, true)
+ got := fprint(t, fset, outFile)
+ expected, err := ioutil.ReadFile(fmt.Sprintf("testdata/%s.expected.go", name))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != string(expected) {
+ t.Errorf("expected:\n%s\n--- got:\n%s\n", string(expected), got)
+ }
+ }
+}
+
+func simplifyAndCompareStmts(t *testing.T, in, out string) {
+ inFile := "package main; func main() { " + in + " }"
+ outFile := "package main; func main() { " + out + " }"
+ simplifyAndCompare(t, inFile, outFile)
+ simplifyAndCompare(t, outFile, outFile)
+}
+
+func simplifyAndCompare(t *testing.T, in, out string) {
+ fset := token.NewFileSet()
+
+ expected := fprint(t, fset, parse(t, fset, out))
+
+ inFile := parse(t, fset, in)
+ typesInfo := &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Scopes: make(map[ast.Node]*types.Scope),
+ }
+ outFile := Simplify(inFile, typesInfo, true)
+ got := fprint(t, fset, outFile)
+
+ if got != expected {
+ t.Errorf("\n--- input:\n%s\n--- expected output:\n%s\n--- got:\n%s\n", in, expected, got)
+ }
+}
+
+func parse(t *testing.T, fset *token.FileSet, body string) *ast.File {
+ file, err := parser.ParseFile(fset, "", body, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return file
+}
+
+func fprint(t *testing.T, fset *token.FileSet, file *ast.File) string {
+ var buf bytes.Buffer
+ if err := printer.Fprint(&buf, fset, file); err != nil {
+ t.Fatal(err)
+ }
+ return buf.String()
+}
+
+func TestContainsCall(t *testing.T) {
+ testContainsCall(t, "a", false)
+ testContainsCall(t, "a()", true)
+ testContainsCall(t, "T{a, b}", false)
+ testContainsCall(t, "T{a, b()}", true)
+ testContainsCall(t, "T{a: a, b: b()}", true)
+ testContainsCall(t, "(a())", true)
+ testContainsCall(t, "a().f", true)
+ testContainsCall(t, "a()[b]", true)
+ testContainsCall(t, "a[b()]", true)
+ testContainsCall(t, "a()[:]", true)
+ testContainsCall(t, "a[b():]", true)
+ testContainsCall(t, "a[:b()]", true)
+ testContainsCall(t, "a[:b:c()]", true)
+ testContainsCall(t, "a().(T)", true)
+ testContainsCall(t, "*a()", true)
+ testContainsCall(t, "-a()", true)
+ testContainsCall(t, "&a()", true)
+ testContainsCall(t, "&a()", true)
+ testContainsCall(t, "a() + b", true)
+ testContainsCall(t, "a + b()", true)
+}
+
+func testContainsCall(t *testing.T, in string, expected bool) {
+ x, err := parser.ParseExpr(in)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got := ContainsCall(x); got != expected {
+ t.Errorf("ContainsCall(%s): expected %t, got %t", in, expected, got)
+ }
+}
diff --git a/vendor/github.com/neelance/sourcemap/sourcemap_test.go b/vendor/github.com/neelance/sourcemap/sourcemap_test.go
new file mode 100644
index 0000000..4d09dad
--- /dev/null
+++ b/vendor/github.com/neelance/sourcemap/sourcemap_test.go
@@ -0,0 +1,60 @@
+package sourcemap
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+)
+
+const testFile = `{"version":3,"file":"min.js","sourceRoot":"/the/root","sources":["one.js","two.js"],"names":["bar","baz","n"],"mappings":"CAAC,IAAI,IAAM,SAAUA,GAClB,OAAOC,IAAID;CCDb,IAAI,IAAM,SAAUE,GAClB,OAAOA"}` + "\n"
+
+func TestReadFrom(t *testing.T) {
+ m, err := ReadFrom(strings.NewReader(testFile))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if m.File != "min.js" || m.SourceRoot != "/the/root" || len(m.Sources) != 2 || m.Sources[0] != "one.js" || len(m.Names) != 3 || m.Names[0] != "bar" {
+ t.Error(m)
+ }
+ mappings := m.DecodedMappings()
+ if len(mappings) != 13 {
+ t.Error(m)
+ }
+ assertMapping := func(got, expected *Mapping) {
+ if got.GeneratedLine != expected.GeneratedLine || got.GeneratedColumn != expected.GeneratedColumn || got.OriginalFile != expected.OriginalFile || got.OriginalLine != expected.OriginalLine || got.OriginalColumn != expected.OriginalColumn || got.OriginalName != expected.OriginalName {
+ t.Errorf("expected %v, got %v", expected, got)
+ }
+ }
+ assertMapping(mappings[0], &Mapping{1, 1, "one.js", 1, 1, ""})
+ assertMapping(mappings[1], &Mapping{1, 5, "one.js", 1, 5, ""})
+ assertMapping(mappings[2], &Mapping{1, 9, "one.js", 1, 11, ""})
+ assertMapping(mappings[3], &Mapping{1, 18, "one.js", 1, 21, "bar"})
+ assertMapping(mappings[4], &Mapping{1, 21, "one.js", 2, 3, ""})
+ assertMapping(mappings[5], &Mapping{1, 28, "one.js", 2, 10, "baz"})
+ assertMapping(mappings[6], &Mapping{1, 32, "one.js", 2, 14, "bar"})
+ assertMapping(mappings[7], &Mapping{2, 1, "two.js", 1, 1, ""})
+ assertMapping(mappings[8], &Mapping{2, 5, "two.js", 1, 5, ""})
+ assertMapping(mappings[9], &Mapping{2, 9, "two.js", 1, 11, ""})
+ assertMapping(mappings[10], &Mapping{2, 18, "two.js", 1, 21, "n"})
+ assertMapping(mappings[11], &Mapping{2, 21, "two.js", 2, 3, ""})
+ assertMapping(mappings[12], &Mapping{2, 28, "two.js", 2, 10, "n"})
+}
+
+func TestWriteTo(t *testing.T) {
+ m, err := ReadFrom(strings.NewReader(testFile))
+ if err != nil {
+ t.Fatal(err)
+ }
+ m.DecodedMappings()
+ m.Swap(3, 4)
+ m.Swap(5, 10)
+ m.Mappings = ""
+ m.Version = 0
+ b := bytes.NewBuffer(nil)
+ if err := m.WriteTo(b); err != nil {
+ t.Fatal(err)
+ }
+ if b.String() != testFile {
+ t.Error(b.String())
+ }
+}
diff --git a/vendor/github.com/shurcooL/httpfs/filter/filter_test.go b/vendor/github.com/shurcooL/httpfs/filter/filter_test.go
new file mode 100644
index 0000000..127e95b
--- /dev/null
+++ b/vendor/github.com/shurcooL/httpfs/filter/filter_test.go
@@ -0,0 +1,117 @@
+package filter_test
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ pathpkg "path"
+ "strings"
+
+ "github.com/shurcooL/httpfs/filter"
+ "github.com/shurcooL/httpfs/vfsutil"
+ "golang.org/x/tools/godoc/vfs/httpfs"
+ "golang.org/x/tools/godoc/vfs/mapfs"
+)
+
+func ExampleKeep() {
+ var srcFS http.FileSystem
+
+ // Keep only "/target/dir" and its contents.
+ fs := filter.Keep(srcFS, func(path string, fi os.FileInfo) bool {
+ return path == "/" ||
+ path == "/target" ||
+ path == "/target/dir" ||
+ strings.HasPrefix(path, "/target/dir/")
+ })
+
+ _ = fs
+}
+
+func ExampleSkip() {
+ var srcFS http.FileSystem
+
+ // Skip all files named ".DS_Store".
+ fs := filter.Skip(srcFS, func(path string, fi os.FileInfo) bool {
+ return !fi.IsDir() && fi.Name() == ".DS_Store"
+ })
+
+ _ = fs
+}
+
+func Example_detailed() {
+ srcFS := httpfs.New(mapfs.New(map[string]string{
+ "zzz-last-file.txt": "It should be visited last.",
+ "a-file.txt": "It has stuff.",
+ "another-file.txt": "Also stuff.",
+ "some-file.html": "and stuff",
+ "folderA/entry-A.txt": "Alpha.",
+ "folderA/entry-B.txt": "Beta.",
+ "folderA/main.go": "package main\n",
+ "folderA/folder-to-skip/many.txt": "Entire folder can be skipped.",
+ "folderA/folder-to-skip/files.txt": "Entire folder can be skipped.",
+ "folder-to-skip": "This is a file, not a folder, and shouldn't be skipped.",
+ }))
+
+ // Skip files with .go and .html extensions, and directories named "folder-to-skip" (but
+ // not files named "folder-to-skip").
+ fs := filter.Skip(srcFS, func(path string, fi os.FileInfo) bool {
+ return pathpkg.Ext(fi.Name()) == ".go" || pathpkg.Ext(fi.Name()) == ".html" ||
+ (fi.IsDir() && fi.Name() == "folder-to-skip")
+ })
+
+ err := vfsutil.Walk(fs, "/", func(path string, fi os.FileInfo, err error) error {
+ if err != nil {
+ log.Printf("can't stat file %s: %v\n", path, err)
+ return nil
+ }
+ fmt.Println(path)
+ return nil
+ })
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println()
+
+ // This file should be filtered out, even if accessed directly.
+ _, err = fs.Open("/folderA/main.go")
+ fmt.Println("os.IsNotExist(err):", os.IsNotExist(err))
+ fmt.Println(err)
+
+ fmt.Println()
+
+ // This folder should be filtered out, even if accessed directly.
+ _, err = fs.Open("/folderA/folder-to-skip")
+ fmt.Println("os.IsNotExist(err):", os.IsNotExist(err))
+ fmt.Println(err)
+
+ fmt.Println()
+
+ // This file should not be filtered out.
+ f, err := fs.Open("/folder-to-skip")
+ if err != nil {
+ panic(err)
+ }
+ io.Copy(os.Stdout, f)
+ f.Close()
+
+ // Output:
+ // /
+ // /a-file.txt
+ // /another-file.txt
+ // /folder-to-skip
+ // /folderA
+ // /folderA/entry-A.txt
+ // /folderA/entry-B.txt
+ // /zzz-last-file.txt
+ //
+ // os.IsNotExist(err): true
+ // open /folderA/main.go: file does not exist
+ //
+ // os.IsNotExist(err): true
+ // open /folderA/folder-to-skip: file does not exist
+ //
+ // This is a file, not a folder, and shouldn't be skipped.
+}
diff --git a/vendor/github.com/shurcooL/httpfs/vfsutil/walk_test.go b/vendor/github.com/shurcooL/httpfs/vfsutil/walk_test.go
new file mode 100644
index 0000000..181af81
--- /dev/null
+++ b/vendor/github.com/shurcooL/httpfs/vfsutil/walk_test.go
@@ -0,0 +1,93 @@
+package vfsutil_test
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+
+ "github.com/shurcooL/httpfs/vfsutil"
+ "golang.org/x/tools/godoc/vfs/httpfs"
+ "golang.org/x/tools/godoc/vfs/mapfs"
+)
+
+func ExampleWalk() {
+ var fs http.FileSystem = httpfs.New(mapfs.New(map[string]string{
+ "zzz-last-file.txt": "It should be visited last.",
+ "a-file.txt": "It has stuff.",
+ "another-file.txt": "Also stuff.",
+ "folderA/entry-A.txt": "Alpha.",
+ "folderA/entry-B.txt": "Beta.",
+ }))
+
+ walkFn := func(path string, fi os.FileInfo, err error) error {
+ if err != nil {
+ log.Printf("can't stat file %s: %v\n", path, err)
+ return nil
+ }
+ fmt.Println(path)
+ return nil
+ }
+
+ err := vfsutil.Walk(fs, "/", walkFn)
+ if err != nil {
+ panic(err)
+ }
+
+ // Output:
+ // /
+ // /a-file.txt
+ // /another-file.txt
+ // /folderA
+ // /folderA/entry-A.txt
+ // /folderA/entry-B.txt
+ // /zzz-last-file.txt
+}
+
+func ExampleWalkFiles() {
+ var fs http.FileSystem = httpfs.New(mapfs.New(map[string]string{
+ "zzz-last-file.txt": "It should be visited last.",
+ "a-file.txt": "It has stuff.",
+ "another-file.txt": "Also stuff.",
+ "folderA/entry-A.txt": "Alpha.",
+ "folderA/entry-B.txt": "Beta.",
+ }))
+
+ walkFn := func(path string, fi os.FileInfo, r io.ReadSeeker, err error) error {
+ if err != nil {
+ log.Printf("can't stat file %s: %v\n", path, err)
+ return nil
+ }
+ fmt.Println(path)
+ if !fi.IsDir() {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ log.Printf("can't read file %s: %v\n", path, err)
+ return nil
+ }
+ fmt.Printf("%q\n", b)
+ }
+ return nil
+ }
+
+ err := vfsutil.WalkFiles(fs, "/", walkFn)
+ if err != nil {
+ panic(err)
+ }
+
+ // Output:
+ // /
+ // /a-file.txt
+ // "It has stuff."
+ // /another-file.txt
+ // "Also stuff."
+ // /folderA
+ // /folderA/entry-A.txt
+ // "Alpha."
+ // /folderA/entry-B.txt
+ // "Beta."
+ // /zzz-last-file.txt
+ // "It should be visited last."
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/address_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/address_test.go
new file mode 100644
index 0000000..29454cb
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/address_test.go
@@ -0,0 +1,281 @@
+package cipher
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/skycoin/skycoin/src/cipher/base58"
+)
+
+func TestMustDecodeBase58Address(t *testing.T) {
+ p, _ := GenerateKeyPair()
+ a := AddressFromPubKey(p)
+ require.NoError(t, a.Verify(p))
+
+ require.Panics(t, func() { MustDecodeBase58Address("") })
+ require.Panics(t, func() { MustDecodeBase58Address("cascs") })
+ b := a.Bytes()
+ h := string(base58.Hex2Base58(b[:len(b)/2]))
+ require.Panics(t, func() { MustDecodeBase58Address(h) })
+ h = string(base58.Hex2Base58(b))
+ require.NotPanics(t, func() { MustDecodeBase58Address(h) })
+ a2 := MustDecodeBase58Address(h)
+ require.Equal(t, a, a2)
+
+ require.NotPanics(t, func() { MustDecodeBase58Address(a.String()) })
+ a2 = MustDecodeBase58Address(a.String())
+ require.Equal(t, a, a2)
+
+ // preceding whitespace is invalid
+ badAddr := " " + a.String()
+ require.Panics(t, func() { MustDecodeBase58Address(badAddr) })
+
+ // preceding zeroes are invalid
+ badAddr = "000" + a.String()
+ require.Panics(t, func() { MustDecodeBase58Address(badAddr) })
+
+ // trailing whitespace is invalid
+ badAddr = a.String() + " "
+ require.Panics(t, func() { MustDecodeBase58Address(badAddr) })
+
+ // trailing zeroes are invalid
+ badAddr = a.String() + "000"
+ require.Panics(t, func() { MustDecodeBase58Address(badAddr) })
+}
+
+func TestDecodeBase58Address(t *testing.T) {
+ p, _ := GenerateKeyPair()
+ a := AddressFromPubKey(p)
+ require.NoError(t, a.Verify(p))
+
+ a2, err := DecodeBase58Address("")
+ require.Error(t, err)
+ a2, err = DecodeBase58Address("cascs")
+ require.Error(t, err)
+ b := a.Bytes()
+ h := string(base58.Hex2Base58(b[:len(b)/2]))
+ a2, err = DecodeBase58Address(h)
+ require.Error(t, err)
+ h = string(base58.Hex2Base58(b))
+ a2, err = DecodeBase58Address(h)
+ require.NoError(t, err)
+ require.Equal(t, a, a2)
+
+ as := a.String()
+ a2, err = DecodeBase58Address(as)
+ require.NoError(t, err)
+ require.Equal(t, a, a2)
+
+ // preceding whitespace is invalid
+ as2 := " " + as
+ _, err = DecodeBase58Address(as2)
+ require.Error(t, err)
+
+ // preceding zeroes are invalid
+ as2 = "000" + as
+ _, err = DecodeBase58Address(as2)
+ require.Error(t, err)
+
+ // trailing whitespace is invalid
+ as2 = as + " "
+ _, err = DecodeBase58Address(as2)
+ require.Error(t, err)
+
+ // trailing zeroes are invalid
+ as2 = as + "000"
+ _, err = DecodeBase58Address(as2)
+ require.Error(t, err)
+}
+
+func TestAddressFromBytes(t *testing.T) {
+ p, _ := GenerateKeyPair()
+ a := AddressFromPubKey(p)
+ a2, err := AddressFromBytes(a.Bytes())
+ require.NoError(t, err)
+ require.Equal(t, a2, a)
+
+ // Invalid number of bytes
+ b := a.Bytes()
+ _, err = AddressFromBytes(b[:len(b)-2])
+ require.EqualError(t, err, "Invalid address length")
+
+ // Invalid checksum
+ b[len(b)-1] += byte(1)
+ _, err = AddressFromBytes(b)
+ require.EqualError(t, err, "Invalid checksum")
+
+ a.Version = 2
+ b = a.Bytes()
+ _, err = AddressFromBytes(b)
+ require.EqualError(t, err, "Invalid version")
+}
+
+func TestBitcoinAddressFromBytes(t *testing.T) {
+ p, _ := GenerateKeyPair()
+ a := AddressFromPubKey(p)
+ a2, err := BitcoinAddressFromBytes(a.BitcoinBytes())
+ require.NoError(t, err)
+ require.Equal(t, a2, a)
+
+ // Invalid number of bytes
+ b := a.BitcoinBytes()
+ _, err = BitcoinAddressFromBytes(b[:len(b)-2])
+ require.EqualError(t, err, "Invalid address length")
+
+ // Invalid checksum
+ b[len(b)-1] += byte(1)
+ _, err = BitcoinAddressFromBytes(b)
+ require.EqualError(t, err, "Invalid checksum")
+
+ a.Version = 2
+ b = a.BitcoinBytes()
+ _, err = BitcoinAddressFromBytes(b)
+ require.EqualError(t, err, "Invalid version")
+}
+
+func TestAddressRoundtrip(t *testing.T) {
+ // Tests encode and decode
+ p, _ := GenerateKeyPair()
+ a := AddressFromPubKey(p)
+ a2, err := AddressFromBytes(a.Bytes())
+ require.NoError(t, err)
+ require.Equal(t, a, a2)
+ require.Equal(t, a.String(), a2.String())
+}
+
+func TestAddressVerify(t *testing.T) {
+ p, _ := GenerateKeyPair()
+ a := AddressFromPubKey(p)
+ // Valid pubkey+address
+ require.NoError(t, a.Verify(p))
+ // Invalid pubkey
+ require.Error(t, a.Verify(PubKey{}))
+ p2, _ := GenerateKeyPair()
+ require.Error(t, a.Verify(p2))
+ // Bad version
+ a.Version = 0x01
+ require.Error(t, a.Verify(p))
+}
+
+func TestAddressString(t *testing.T) {
+ p, _ := GenerateKeyPair()
+ a := AddressFromPubKey(p)
+ s := a.String()
+ a2, err := DecodeBase58Address(s)
+ require.NoError(t, err)
+ require.Equal(t, a2, a)
+ s2 := a2.String()
+ a3, err := DecodeBase58Address(s2)
+ require.NoError(t, err)
+ require.Equal(t, a2, a3)
+}
+
+func TestBitcoinAddress1(t *testing.T) {
+ seckey := MustSecKeyFromHex("1111111111111111111111111111111111111111111111111111111111111111")
+ pubkey := PubKeyFromSecKey(seckey)
+ pubkeyStr := "034f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa"
+ require.Equal(t, pubkeyStr, pubkey.Hex())
+ bitcoinStr := "1Q1pE5vPGEEMqRcVRMbtBK842Y6Pzo6nK9"
+ bitcoinAddr := BitcoinAddressFromPubkey(pubkey)
+ require.Equal(t, bitcoinStr, bitcoinAddr)
+}
+
+func TestBitcoinAddress2(t *testing.T) {
+ seckey := MustSecKeyFromHex("dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd")
+ pubkey := PubKeyFromSecKey(seckey)
+ pubkeyStr := "02ed83704c95d829046f1ac27806211132102c34e9ac7ffa1b71110658e5b9d1bd"
+ require.Equal(t, pubkeyStr, pubkey.Hex())
+ bitcoinStr := "1NKRhS7iYUGTaAfaR5z8BueAJesqaTyc4a"
+ bitcoinAddr := BitcoinAddressFromPubkey(pubkey)
+ require.Equal(t, bitcoinStr, bitcoinAddr)
+}
+
+func TestBitcoinAddress3(t *testing.T) {
+ seckey := MustSecKeyFromHex("47f7616ea6f9b923076625b4488115de1ef1187f760e65f89eb6f4f7ff04b012")
+ pubkey := PubKeyFromSecKey(seckey)
+ pubkeyStr := "032596957532fc37e40486b910802ff45eeaa924548c0e1c080ef804e523ec3ed3"
+ require.Equal(t, pubkeyStr, pubkey.Hex())
+ bitcoinStr := "19ck9VKC6KjGxR9LJg4DNMRc45qFrJguvV"
+ bitcoinAddr := BitcoinAddressFromPubkey(pubkey)
+ require.Equal(t, bitcoinStr, bitcoinAddr)
+}
+
+func TestBitcoinWIPRoundTrio(t *testing.T) {
+
+ _, seckey1 := GenerateKeyPair()
+ wip1 := BitcoinWalletImportFormatFromSeckey(seckey1)
+ seckey2, err := SecKeyFromWalletImportFormat(wip1)
+ wip2 := BitcoinWalletImportFormatFromSeckey(seckey2)
+
+ require.NoError(t, err)
+ require.Equal(t, seckey1, seckey2)
+ require.Equal(t, seckey1.Hex(), seckey2.Hex())
+ require.Equal(t, wip1, wip2)
+
+}
+
+func TestBitcoinWIP(t *testing.T) {
+ //wallet input format string
+ var wip = []string{
+ "KwntMbt59tTsj8xqpqYqRRWufyjGunvhSyeMo3NTYpFYzZbXJ5Hp",
+ "L4ezQvyC6QoBhxB4GVs9fAPhUKtbaXYUn8YTqoeXwbevQq4U92vN",
+ "KydbzBtk6uc7M6dXwEgTEH2sphZxSPbmDSz6kUUHi4eUpSQuhEbq",
+ }
+ //the expected pubkey to generate
+ var pub = []string{
+ "034f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa",
+ "02ed83704c95d829046f1ac27806211132102c34e9ac7ffa1b71110658e5b9d1bd",
+ "032596957532fc37e40486b910802ff45eeaa924548c0e1c080ef804e523ec3ed3",
+ }
+ //the expected addrss to generate
+ var addr = []string{
+ "1Q1pE5vPGEEMqRcVRMbtBK842Y6Pzo6nK9",
+ "1NKRhS7iYUGTaAfaR5z8BueAJesqaTyc4a",
+ "19ck9VKC6KjGxR9LJg4DNMRc45qFrJguvV",
+ }
+
+ for i := range wip {
+ seckey, err := SecKeyFromWalletImportFormat(wip[i])
+ require.Equal(t, nil, err)
+ _ = MustSecKeyFromWalletImportFormat(wip[i])
+ pubkey := PubKeyFromSecKey(seckey)
+ require.Equal(t, pub[i], pubkey.Hex())
+ bitcoinAddr := BitcoinAddressFromPubkey(pubkey)
+ require.Equal(t, addr[i], bitcoinAddr)
+ }
+
+ /*
+ seckey := MustSecKeyFromHex("47f7616ea6f9b923076625b4488115de1ef1187f760e65f89eb6f4f7ff04b012")
+ pubkey := PubKeyFromSecKey(seckey)
+ pubkey_str := "032596957532fc37e40486b910802ff45eeaa924548c0e1c080ef804e523ec3ed3"
+ require.Equal(t, pubkey_str, pubkey.Hex())
+ bitcoin_str := "19ck9VKC6KjGxR9LJg4DNMRc45qFrJguvV"
+ bitcoin_addr := BitcoinAddressFromPubkey(pubkey)
+ require.Equal(t, bitcoin_str, bitcoin_addr)
+ */
+}
+
+func TestAddressBulk(t *testing.T) {
+
+ for i := 0; i < 1024; i++ {
+ pub, _ := GenerateDeterministicKeyPair(RandByte(32))
+
+ a := AddressFromPubKey(pub)
+ require.NoError(t, a.Verify(pub))
+ s := a.String()
+ a2, err := DecodeBase58Address(s)
+ require.NoError(t, err)
+ require.Equal(t, a2, a)
+
+ }
+}
+
+func TestAddressNull(t *testing.T) {
+ var a Address
+ require.True(t, a.Null())
+
+ p, _ := GenerateKeyPair()
+ a = AddressFromPubKey(p)
+ require.False(t, a.Null())
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/crypto_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/crypto_test.go
new file mode 100644
index 0000000..91ff956
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/crypto_test.go
@@ -0,0 +1,344 @@
+package cipher
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/hex"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/skycoin/skycoin/src/cipher/ripemd160"
+)
+
+func TestNewPubKey(t *testing.T) {
+ assert.Panics(t, func() { NewPubKey(randBytes(t, 31)) })
+ assert.Panics(t, func() { NewPubKey(randBytes(t, 32)) })
+ assert.Panics(t, func() { NewPubKey(randBytes(t, 34)) })
+ assert.Panics(t, func() { NewPubKey(randBytes(t, 0)) })
+ assert.Panics(t, func() { NewPubKey(randBytes(t, 100)) })
+ assert.NotPanics(t, func() { NewPubKey(randBytes(t, 33)) })
+ b := randBytes(t, 33)
+ p := NewPubKey(b)
+ assert.True(t, bytes.Equal(p[:], b))
+}
+
+func TestPubKeyFromHex(t *testing.T) {
+ // Invalid hex
+ assert.Panics(t, func() { MustPubKeyFromHex("") })
+ assert.Panics(t, func() { MustPubKeyFromHex("cascs") })
+ // Invalid hex length
+ p := NewPubKey(randBytes(t, 33))
+ s := hex.EncodeToString(p[:len(p)/2])
+ assert.Panics(t, func() { MustPubKeyFromHex(s) })
+ // Valid
+ s = hex.EncodeToString(p[:])
+ assert.NotPanics(t, func() { MustPubKeyFromHex(s) })
+ assert.Equal(t, p, MustPubKeyFromHex(s))
+}
+
+func TestPubKeyHex(t *testing.T) {
+ b := randBytes(t, 33)
+ p := NewPubKey(b)
+ h := p.Hex()
+ p2 := MustPubKeyFromHex(h)
+ assert.Equal(t, p2, p)
+ assert.Equal(t, p2.Hex(), h)
+}
+
+func TestPubKeyVerify(t *testing.T) {
+ // Random bytes should not be valid, most of the time
+ failed := false
+ for i := 0; i < 10; i++ {
+ b := randBytes(t, 33)
+ if NewPubKey(b).Verify() != nil {
+ failed = true
+ break
+ }
+ }
+ assert.True(t, failed)
+}
+
+func TestPubKeyVerifyNil(t *testing.T) {
+ // Empty public key should not be valid
+ p := PubKey{}
+ assert.NotNil(t, p.Verify())
+}
+
+func TestPubKeyVerifyDefault1(t *testing.T) {
+ // Generated pub key should be valid
+ p, _ := GenerateKeyPair()
+ assert.Nil(t, p.Verify())
+}
+
+func TestPubKeyVerifyDefault2(t *testing.T) {
+ for i := 0; i < 1024; i++ {
+ p, _ := GenerateKeyPair()
+ assert.Nil(t, p.Verify())
+ }
+}
+
+func TestPubKeyToAddressHash(t *testing.T) {
+ p, _ := GenerateKeyPair()
+ h := p.ToAddressHash()
+ // Should be Ripemd160(SHA256(SHA256()))
+ x := sha256.Sum256(p[:])
+ x = sha256.Sum256(x[:])
+ rh := ripemd160.New()
+ rh.Write(x[:])
+ y := rh.Sum(nil)
+ assert.True(t, bytes.Equal(h[:], y))
+}
+
+func TestPubKeyToAddress(t *testing.T) {
+ p, _ := GenerateKeyPair()
+ addr := AddressFromPubKey(p)
+ //func (self Address) Verify(key PubKey) error {
+ err := addr.Verify(p)
+ assert.Nil(t, err)
+ addrStr := addr.String()
+ _, err = DecodeBase58Address(addrStr)
+ //func DecodeBase58Address(addr string) (Address, error) {
+ assert.Nil(t, err)
+}
+
+func TestPubKeyToAddress2(t *testing.T) {
+ for i := 0; i < 1024; i++ {
+ p, _ := GenerateKeyPair()
+ addr := AddressFromPubKey(p)
+ //func (self Address) Verify(key PubKey) error {
+ err := addr.Verify(p)
+ assert.Nil(t, err)
+ addrStr := addr.String()
+ _, err = DecodeBase58Address(addrStr)
+ //func DecodeBase58Address(addr string) (Address, error) {
+ assert.Nil(t, err)
+ }
+}
+
+func TestMustNewSecKey(t *testing.T) {
+ assert.Panics(t, func() { NewSecKey(randBytes(t, 31)) })
+ assert.Panics(t, func() { NewSecKey(randBytes(t, 33)) })
+ assert.Panics(t, func() { NewSecKey(randBytes(t, 34)) })
+ assert.Panics(t, func() { NewSecKey(randBytes(t, 0)) })
+ assert.Panics(t, func() { NewSecKey(randBytes(t, 100)) })
+ assert.NotPanics(t, func() { NewSecKey(randBytes(t, 32)) })
+ b := randBytes(t, 32)
+ p := NewSecKey(b)
+ assert.True(t, bytes.Equal(p[:], b))
+}
+
+func TestMustSecKeyFromHex(t *testing.T) {
+ // Invalid hex
+ assert.Panics(t, func() { MustSecKeyFromHex("") })
+ assert.Panics(t, func() { MustSecKeyFromHex("cascs") })
+ // Invalid hex length
+ p := NewSecKey(randBytes(t, 32))
+ s := hex.EncodeToString(p[:len(p)/2])
+ assert.Panics(t, func() { MustSecKeyFromHex(s) })
+ // Valid
+ s = hex.EncodeToString(p[:])
+ assert.NotPanics(t, func() { MustSecKeyFromHex(s) })
+ assert.Equal(t, p, MustSecKeyFromHex(s))
+}
+
+func TestSecKeyHex(t *testing.T) {
+ b := randBytes(t, 32)
+ p := NewSecKey(b)
+ h := p.Hex()
+ p2 := MustSecKeyFromHex(h)
+ assert.Equal(t, p2, p)
+ assert.Equal(t, p2.Hex(), h)
+}
+
+func TestSecKeyVerify(t *testing.T) {
+ // Empty secret key should not be valid
+ p := SecKey{}
+ assert.NotNil(t, p.Verify())
+
+ // Generated sec key should be valid
+ _, p = GenerateKeyPair()
+ assert.Nil(t, p.Verify())
+
+ // Random bytes are usually valid
+}
+
+func TestECDHonce(t *testing.T) {
+ pub1, sec1 := GenerateKeyPair()
+ pub2, sec2 := GenerateKeyPair()
+
+ buf1 := ECDH(pub2, sec1)
+ buf2 := ECDH(pub1, sec2)
+
+ assert.True(t, bytes.Equal(buf1, buf2))
+}
+
+func TestECDHloop(t *testing.T) {
+ for i := 0; i < 128; i++ {
+ pub1, sec1 := GenerateKeyPair()
+ pub2, sec2 := GenerateKeyPair()
+ buf1 := ECDH(pub2, sec1)
+ buf2 := ECDH(pub1, sec2)
+ assert.True(t, bytes.Equal(buf1, buf2))
+ }
+}
+
+func TestNewSig(t *testing.T) {
+ assert.Panics(t, func() { NewSig(randBytes(t, 64)) })
+ assert.Panics(t, func() { NewSig(randBytes(t, 66)) })
+ assert.Panics(t, func() { NewSig(randBytes(t, 67)) })
+ assert.Panics(t, func() { NewSig(randBytes(t, 0)) })
+ assert.Panics(t, func() { NewSig(randBytes(t, 100)) })
+ assert.NotPanics(t, func() { NewSig(randBytes(t, 65)) })
+ b := randBytes(t, 65)
+ p := NewSig(b)
+ assert.True(t, bytes.Equal(p[:], b))
+}
+
+func TestMustSigFromHex(t *testing.T) {
+ // Invalid hex
+ assert.Panics(t, func() { MustSigFromHex("") })
+ assert.Panics(t, func() { MustSigFromHex("cascs") })
+ // Invalid hex length
+ p := NewSig(randBytes(t, 65))
+ s := hex.EncodeToString(p[:len(p)/2])
+ assert.Panics(t, func() { MustSigFromHex(s) })
+ // Valid
+ s = hex.EncodeToString(p[:])
+ assert.NotPanics(t, func() { MustSigFromHex(s) })
+ assert.Equal(t, p, MustSigFromHex(s))
+}
+
+func TestSigHex(t *testing.T) {
+ b := randBytes(t, 65)
+ p := NewSig(b)
+ h := p.Hex()
+ p2 := MustSigFromHex(h)
+ assert.Equal(t, p2, p)
+ assert.Equal(t, p2.Hex(), h)
+}
+
+func TestChkSig(t *testing.T) {
+ p, s := GenerateKeyPair()
+ assert.Nil(t, p.Verify())
+ assert.Nil(t, s.Verify())
+ a := AddressFromPubKey(p)
+ assert.Nil(t, a.Verify(p))
+ b := randBytes(t, 256)
+ h := SumSHA256(b)
+ sig := SignHash(h, s)
+ assert.Nil(t, ChkSig(a, h, sig))
+ // Empty sig should be invalid
+ assert.NotNil(t, ChkSig(a, h, Sig{}))
+ // Random sigs should not pass
+ for i := 0; i < 100; i++ {
+ assert.NotNil(t, ChkSig(a, h, NewSig(randBytes(t, 65))))
+ }
+ // Sig for one hash does not work for another hash
+ h2 := SumSHA256(randBytes(t, 256))
+ sig2 := SignHash(h2, s)
+ assert.Nil(t, ChkSig(a, h2, sig2))
+ assert.NotNil(t, ChkSig(a, h, sig2))
+ assert.NotNil(t, ChkSig(a, h2, sig))
+
+ // Different secret keys should not create same sig
+ p2, s2 := GenerateKeyPair()
+ a2 := AddressFromPubKey(p2)
+ h = SHA256{}
+ sig = SignHash(h, s)
+ sig2 = SignHash(h, s2)
+ assert.Nil(t, ChkSig(a, h, sig))
+ assert.Nil(t, ChkSig(a2, h, sig2))
+ assert.NotEqual(t, sig, sig2)
+ h = SumSHA256(randBytes(t, 256))
+ sig = SignHash(h, s)
+ sig2 = SignHash(h, s2)
+ assert.Nil(t, ChkSig(a, h, sig))
+ assert.Nil(t, ChkSig(a2, h, sig2))
+ assert.NotEqual(t, sig, sig2)
+
+ // Bad address should be invalid
+ assert.NotNil(t, ChkSig(a, h, sig2))
+ assert.NotNil(t, ChkSig(a2, h, sig))
+}
+
+func TestSignHash(t *testing.T) {
+ p, s := GenerateKeyPair()
+ a := AddressFromPubKey(p)
+ h := SumSHA256(randBytes(t, 256))
+ sig := SignHash(h, s)
+ assert.NotEqual(t, sig, Sig{})
+ assert.Nil(t, ChkSig(a, h, sig))
+}
+
+func TestPubKeyFromSecKey(t *testing.T) {
+ p, s := GenerateKeyPair()
+ assert.Equal(t, PubKeyFromSecKey(s), p)
+ assert.Panics(t, func() { PubKeyFromSecKey(SecKey{}) })
+ assert.Panics(t, func() { PubKeyFromSecKey(NewSecKey(randBytes(t, 99))) })
+ assert.Panics(t, func() { PubKeyFromSecKey(NewSecKey(randBytes(t, 31))) })
+}
+
+func TestPubKeyFromSig(t *testing.T) {
+ p, s := GenerateKeyPair()
+ h := SumSHA256(randBytes(t, 256))
+ sig := SignHash(h, s)
+ p2, err := PubKeyFromSig(sig, h)
+ assert.Equal(t, p, p2)
+ assert.Nil(t, err)
+ _, err = PubKeyFromSig(Sig{}, h)
+ assert.NotNil(t, err)
+}
+
+func TestVerifySignature(t *testing.T) {
+ p, s := GenerateKeyPair()
+ h := SumSHA256(randBytes(t, 256))
+ h2 := SumSHA256(randBytes(t, 256))
+ sig := SignHash(h, s)
+ assert.Nil(t, VerifySignature(p, sig, h))
+ assert.NotNil(t, VerifySignature(p, Sig{}, h))
+ assert.NotNil(t, VerifySignature(p, sig, h2))
+ p2, _ := GenerateKeyPair()
+ assert.NotNil(t, VerifySignature(p2, sig, h))
+ assert.NotNil(t, VerifySignature(PubKey{}, sig, h))
+}
+
+func TestGenerateKeyPair(t *testing.T) {
+ p, s := GenerateKeyPair()
+ assert.Nil(t, p.Verify())
+ assert.Nil(t, s.Verify())
+}
+
+func TestGenerateDeterministicKeyPair(t *testing.T) {
+ // TODO -- deterministic key pairs are useless as is because we can't
+ // generate pair n+1, only pair 0
+ seed := randBytes(t, 32)
+ p, s := GenerateDeterministicKeyPair(seed)
+ assert.Nil(t, p.Verify())
+ assert.Nil(t, s.Verify())
+ p, s = GenerateDeterministicKeyPair(seed)
+ assert.Nil(t, p.Verify())
+ assert.Nil(t, s.Verify())
+}
+
+func TestSecKeTest(t *testing.T) {
+ _, s := GenerateKeyPair()
+ assert.Nil(t, TestSecKey(s))
+ assert.NotNil(t, TestSecKey(SecKey{}))
+}
+
+func TestSecKeyHashTest(t *testing.T) {
+ _, s := GenerateKeyPair()
+ h := SumSHA256(randBytes(t, 256))
+ assert.Nil(t, TestSecKeyHash(s, h))
+ assert.NotNil(t, TestSecKeyHash(SecKey{}, h))
+}
+
+func TestGenerateDeterministicKeyPairsUsesAllBytes(t *testing.T) {
+ // Tests that if a seed >128 bits is used, the generator does not ignore bits >128
+ seed := "property diet little foster provide disagree witness mountain alley weekend kitten general"
+ seckeys := GenerateDeterministicKeyPairs([]byte(seed), 3)
+ seckeys2 := GenerateDeterministicKeyPairs([]byte(seed[:16]), 3)
+ require.NotEqual(t, seckeys, seckeys2)
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/encoder/encoder_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/encoder/encoder_test.go
new file mode 100644
index 0000000..901ef9b
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/encoder/encoder_test.go
@@ -0,0 +1,894 @@
+package encoder
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/hex"
+ "log"
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/skycoin/skycoin/src/cipher"
+)
+
+func randBytes(n int) []byte { // nolint: unparam
+ const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ var bytes = make([]byte, n)
+ rand.Read(bytes)
+ for i, b := range bytes {
+ bytes[i] = alphanum[b%byte(len(alphanum))]
+ }
+ return bytes
+}
+
+/*
+* the file name has to end with _test.go to be picked up as a set of tests by go test
+* the package name has to be the same as in the source file that has to be tested
+* you have to import the package testing
+* all test functions should start with Test to be run as a test
+* the tests will be executed in the same order that they are appear in the source
+* the test function TestXxx functions take a pointer to the type testing.T. You use it to record the test status and also for logging.
+* the signature of the test function should always be func TestXxx ( *testing.T). You can have any combination of alphanumeric characters and the hyphen for the Xxx part, the only constraint that it should not begin with a small alphabet, [a-z].
+* a call to any of the following functions of testing.T within the test code Error, Errorf, FailNow, Fatal, FatalIf will indicate to go test that the test has failed.
+ */
+
+//Size of= 13
+type TestStruct struct {
+ X int32
+ Y int64
+ Z uint8
+ K []byte
+ W bool
+ T string
+ U cipher.PubKey
+}
+
+type TestStruct2 struct {
+ X int32
+ Y int64
+ Z uint8
+ K [8]byte
+ W bool
+}
+
+type TestStructIgnore struct {
+ X int32
+ Y int64
+ Z uint8 `enc:"-"`
+ K []byte
+}
+
+type TestStructWithoutIgnore struct {
+ X int32
+ Y int64
+ K []byte
+}
+
+//func (*B) Fatal
+
+func Test_Encode_1(T *testing.T) { //test function starts with "Test" and takes a pointer to type testing.T
+ var t TestStruct
+ t.X = 345535
+ t.Y = 23432435443
+ t.Z = 255
+ t.K = []byte("TEST6")
+ t.W = true
+ t.T = "hello"
+ t.U = cipher.PubKey{1, 2, 3, 0, 5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+ b := Serialize(t)
+
+ var buf bytes.Buffer
+ buf.Write(b)
+
+ var t2 TestStruct
+ err := Deserialize(&buf, len(b), &t2)
+ if err != nil {
+ T.Fatal(err)
+ }
+
+ b2 := Serialize(t2)
+
+ if bytes.Compare(b, b2) != 0 {
+ T.Fatal()
+ }
+}
+
+func Test_Encode_2a(T *testing.T) { //test function starts with "Test" and takes a pointer to type testing.T
+ var t TestStruct2
+ t.X = 345535
+ t.Y = 23432435443
+ t.Z = 255
+ t.W = false
+ _tt := []byte("ASDSADFSDFASDFSD")
+ for i := 0; i < 8; i++ {
+ t.K[i] = _tt[i]
+ }
+
+ b := Serialize(t)
+
+ var buf bytes.Buffer
+ buf.Write(b)
+
+ var t2 TestStruct2
+ err := Deserialize(&buf, len(b), &t2)
+ if err != nil {
+ T.Fatal(err)
+ }
+
+ b2 := Serialize(t2)
+
+ if bytes.Compare(b, b2) != 0 {
+ T.Fatal()
+ }
+}
+
+func Test_Encode_2b(T *testing.T) { //test function starts with "Test" and takes a pointer to type testing.T
+ var t TestStruct2
+ t.X = 345535
+ t.Y = 23432435443
+ t.Z = 255
+ _tt := []byte("ASDSADFSDFASDFSD")
+ for i := 0; i < 8; i++ {
+ t.K[i] = _tt[i]
+ }
+
+ b := Serialize(t)
+
+ var t2 TestStruct2
+ err := DeserializeRaw(b, &t2)
+ if err != nil {
+ T.Fatal(err)
+ }
+
+ b2 := Serialize(t2)
+
+ if bytes.Compare(b, b2) != 0 {
+ T.Fatal()
+ }
+}
+
+type TestStruct3 struct {
+ X int32
+ K []byte
+}
+
+func Test_Encode_3a(T *testing.T) { //test function starts with "Test" and takes a pointer to type testing.T
+ var t1 TestStruct3
+ t1.X = 345535
+ t1.K = randBytes(32)
+
+ b := Serialize(t1)
+
+ var buf bytes.Buffer
+ buf.Write(b)
+
+ var t2 TestStruct3
+ err := Deserialize(&buf, len(b), &t2)
+ if err != nil {
+ T.Fatal(err)
+ }
+
+ if t1.X != t2.X || len(t1.K) != len(t2.K) || bytes.Compare(t1.K, t2.K) != 0 {
+ T.Fatal()
+ }
+
+ b2 := Serialize(t2)
+
+ if bytes.Compare(b, b2) != 0 {
+ T.Fatal()
+ }
+}
+
+func Test_Encode_3b(T *testing.T) { //test function starts with "Test" and takes a pointer to type testing.T
+ var t1 TestStruct3
+ t1.X = 345535
+ t1.K = randBytes(32)
+
+ b := Serialize(t1)
+
+ var t2 TestStruct3
+ err := DeserializeRaw(b, &t2)
+ if err != nil {
+ T.Fatal(err)
+ }
+
+ if t1.X != t2.X || len(t1.K) != len(t2.K) || bytes.Compare(t1.K, t2.K) != 0 {
+ T.Fatal()
+ }
+
+ b2 := Serialize(t2)
+
+ if bytes.Compare(b, b2) != 0 {
+ T.Fatal()
+ }
+}
+
+type TestStruct4 struct {
+ X int32
+ Y int32
+}
+
+type TestStruct5 struct {
+ X int32
+ A []TestStruct4
+}
+
+func Test_Encode_4(T *testing.T) {
+ var t1 TestStruct5
+ t1.X = 345535
+
+ const NUM = 8
+ t1.A = make([]TestStruct4, NUM)
+
+ b := Serialize(t1)
+
+ var t2 TestStruct5
+ err := DeserializeRaw(b, &t2)
+ if err != nil {
+ T.Fatal(err)
+ }
+
+ if t1.X != t2.X {
+ T.Fatal("TestStruct5.X not equal")
+ }
+
+ if len(t1.A) != len(t2.A) {
+ T.Fatal("Slice lengths not equal")
+ }
+
+ for i, ts := range t1.A {
+ if ts != t2.A[i] {
+ T.Fatal("Slice values not equal")
+ }
+ }
+
+ b2 := Serialize(t2)
+
+ if bytes.Compare(b, b2) != 0 {
+ T.Fatal()
+ }
+}
+
+// type TestStruct2 struct {
+// X int32
+// Y int64
+// Z uint8
+// K [8]byte
+// }
+
+func Test_Encode_5(T *testing.T) {
+
+ var ts TestStruct2
+ ts.X = 345535
+ ts.Y = 23432435443
+ ts.Z = 255
+
+ b1 := Serialize(ts)
+
+ var t = reflect.TypeOf(ts)
+ var v = reflect.New(t) //pointer to type t
+
+ //New returns a Value representing a pointer to a new zero value for the specified type.
+ //That is, the returned Value's Type is PtrTo(t).
+
+ _, err := DeserializeRawToValue(b1, v)
+ if err != nil {
+ T.Fatal(err)
+ }
+
+ v = reflect.Indirect(v)
+ if v.FieldByName("X").Int() != int64(ts.X) {
+ T.Fatalf("X not equal")
+ }
+ if v.FieldByName("Y").Int() != ts.Y {
+ T.Fatalf("Y not equal")
+ }
+ if v.FieldByName("Z").Uint() != uint64(ts.Z) {
+ T.Fatalf("Z not equal")
+ }
+}
+
+func Test_Encode_IgnoreTagSerialize(T *testing.T) {
+ var t TestStructIgnore
+ t.X = 345535
+ t.Y = 23432435443
+ t.Z = 255
+ t.K = []byte("TEST6")
+
+ b := Serialize(t)
+ var buf bytes.Buffer
+ buf.Write(b)
+
+ var t2 TestStructIgnore
+ t.X = 0
+ t.Y = 0
+ t.Z = 0
+ t.K = []byte("")
+ err := Deserialize(&buf, len(b), &t2)
+ if err != nil {
+ T.Fatal(err)
+ }
+
+ if t2.Z != 0 {
+ T.Fatalf("Z should not deserialize. It is %d", t2.Z)
+ }
+
+ buf.Reset()
+ buf.Write(b)
+
+ var t3 TestStructWithoutIgnore
+ err = Deserialize(&buf, len(b), &t3)
+ if err != nil {
+ T.Fatal(err)
+ }
+
+ b2 := Serialize(t2)
+ if bytes.Compare(b, b2) != 0 {
+ T.Fatal()
+ }
+}
+
+type Contained struct {
+ X uint32
+ Y uint64
+ Bytes []uint8
+ Ints []uint16
+}
+
+type Container struct {
+ Elements []Contained
+}
+
+func TestEncodeNestedSlice(t *testing.T) {
+ size := 0
+ elems := make([]Contained, 4)
+ for i := range elems {
+ elems[i].X = uint32(i)
+ size += 4
+ elems[i].Y = uint64(i)
+ size += 8
+ elems[i].Bytes = make([]uint8, i)
+ for j := range elems[i].Bytes {
+ elems[i].Bytes[j] = uint8(j)
+ }
+ size += 4 + i*1
+ elems[i].Ints = make([]uint16, i)
+ for j := range elems[i].Ints {
+ elems[i].Ints[j] = uint16(j)
+ }
+ size += 4 + i*2
+ }
+ c := Container{elems}
+ n, err := datasizeWrite(reflect.ValueOf(c))
+ if err != nil {
+ t.Fatalf("datasizeWrite failed: %v", err)
+ }
+ if n != size+4 {
+ t.Fatal("Wrong data size")
+ }
+ b := Serialize(c)
+ d := Container{}
+ err = DeserializeRaw(b, &d)
+ if err != nil {
+ t.Fatalf("DeserializeRaw failed: %v", err)
+ }
+ for i, e := range d.Elements {
+ if c.Elements[i].X != e.X || c.Elements[i].Y != e.Y {
+ t.Fatalf("Deserialized x, y to invalid value. "+
+ "Expected %d,%d but got %d,%d", c.Elements[i].X,
+ c.Elements[i].Y, e.X, e.Y)
+ }
+ if len(c.Elements[i].Bytes) != len(e.Bytes) {
+ t.Fatal("Deserialized Bytes to invalid length")
+ }
+ for j, b := range c.Elements[i].Bytes {
+ if c.Elements[i].Bytes[j] != b {
+ t.Fatal("Deserialized to invalid value")
+ }
+ }
+ if len(c.Elements[i].Ints) != len(e.Ints) {
+ t.Fatal("Deserialized Ints to invalid length")
+ }
+ for j, b := range c.Elements[i].Ints {
+ if c.Elements[i].Ints[j] != b {
+ t.Fatal("Deserialized Ints to invalid value")
+ }
+ }
+ }
+}
+
+type Array struct {
+ Arr []int
+}
+
+func TestDecodeNotEnoughLength(t *testing.T) {
+ b := make([]byte, 2)
+ var d Array
+ err := DeserializeRaw(b, &d)
+ if err == nil {
+ t.Fatal("Expected error")
+ } else if err.Error() != "Deserialization failed" {
+ t.Fatalf("Expected different error, but got %s", err.Error())
+ }
+
+ // Test with slice
+ thing := make([]int, 3)
+ err = DeserializeRaw(b, thing)
+ if err == nil {
+ t.Fatal("Expected error")
+ } else if err.Error() != "Deserialization failed" {
+ t.Fatal("Expected different error")
+ }
+}
+
+func TestFlattenMultidimensionalBytes(t *testing.T) {
+ var data [16][16]byte
+ for i := 0; i < 16; i++ {
+ for j := 0; j < 16; j++ {
+ data[i][j] = byte(i * j)
+ }
+ }
+
+ b := Serialize(data)
+ expect := 16 * 16
+ if len(b) != expect {
+ t.Fatalf("Expected %d bytes, decoded to %d bytes", expect, len(b))
+ }
+
+}
+
+func TestMultiArrays(T *testing.T) {
+ var data [16][16]byte
+ for i := 0; i < 16; i++ {
+ for j := 0; j < 16; j++ {
+ data[i][j] = byte(i * j)
+ }
+ }
+
+ b := Serialize(data)
+
+ var data2 [16][16]byte
+
+ err := DeserializeRaw(b, &data2)
+ if err != nil {
+ T.Fatal(err)
+ }
+
+ for i := 0; i < 16; i++ {
+ for j := 0; j < 16; j++ {
+ if data[i][j] != data2[i][j] {
+ T.Fatalf("failed round trip test")
+ }
+ }
+ }
+
+ b2 := Serialize(data2)
+ if !bytes.Equal(b, b2) {
+ T.Fatalf("Failed round trip test")
+ }
+
+ if len(b) != 256 {
+ T.Fatalf("decoded to wrong byte length")
+ }
+
+}
+
+func TestSerializeAtomic(t *testing.T) {
+
+ var sp uint64 = 0x000C8A9E1809F720
+ b := SerializeAtomic(sp)
+
+ var i uint64
+ DeserializeAtomic(b, &i)
+
+ if i != sp {
+ t.Fatal("round trip atomic fail")
+ }
+}
+
+func TestPushPop(t *testing.T) {
+ var sp uint64 = 0x000C8A9E1809F720
+
+ var d [8]byte
+ EncodeInt(d[0:8], sp)
+
+ //fmt.Printf("d= %X \n", d[:])
+
+ var ti uint64
+ DecodeInt(d[0:8], &ti)
+
+ if ti != sp {
+ //fmt.Printf("sp= %X ti= %X \n", sp,ti)
+ t.Error("roundtrip failed")
+ }
+}
+
+type TestStruct5a struct {
+ Test uint64
+}
+
+func TestPanicTest(t *testing.T) {
+
+ defer func() {
+ if r := recover(); r == nil {
+ t.Error("EncodeInt Did not panic")
+ }
+ }()
+
+ log.Panic()
+}
+
+func TestPushPopNegative(t *testing.T) {
+
+ defer func() {
+ if r := recover(); r == nil {
+ t.Error("EncodeInt Did not panic on invalid input type")
+ }
+ }()
+
+ var tst TestStruct5a
+ //var sp uint64 = 0x000C8A9E1809F720
+ var d [8]byte
+ EncodeInt(d[0:8], &tst) //attemp to encode invalid type
+
+}
+
+func TestByteArray(t *testing.T) {
+
+ tstr := "7105a46cb4c2810f0c916e0bb4b4e4ef834ad42040c471b42c96d356a9fd1b21"
+
+ d, err := hex.DecodeString(tstr)
+ if err != nil {
+ t.Fail()
+ }
+
+ buff := Serialize(d)
+ var buff2 [32]byte
+ copy(buff2[0:32], buff[0:32])
+
+ if len(buff2) != 32 {
+ t.Errorf("incorrect serialization length for fixed sized arrays: %d byte fixed sized array serialized to %d bytes \n", len(d), len(buff2))
+ }
+
+}
+
+func TestEncodeDictInt2Int(t *testing.T) {
+ m1 := map[uint8]uint64{0: 0, 1: 1, 2: 2}
+ buff := Serialize(m1)
+ if len(buff) != 4 /* Length */ +(1+8)*len(m1) /* 1b key + 8b value per entry */ {
+ t.Fail()
+ }
+ m2 := make(map[uint8]uint64)
+ if DeserializeRaw(buff, m2) != nil {
+ t.Fail()
+ }
+ if len(m1) != len(m2) {
+ t.Errorf("Expected length %d but got %d", len(m1), len(m2))
+ }
+ for key := range m1 {
+ if m1[key] != m2[key] {
+ t.Errorf("Expected value %d for key %d but got %d", m1[key], key, m2[key])
+ }
+ }
+}
+
+type TestStructWithDict struct {
+ X int32
+ Y int64
+ M map[uint8]TestStruct
+ K []byte
+}
+
+func TestEncodeDictNested(t *testing.T) {
+ s1 := TestStructWithDict{
+ 0x01234567,
+ 0x0123456789ABCDEF,
+ map[uint8]TestStruct{
+ 0x01: TestStruct{
+ 0x01234567,
+ 0x0123456789ABCDEF,
+ 0x01,
+ []byte{0, 1, 2},
+ true,
+ "ab",
+ cipher.PubKey{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ },
+ },
+ 0x23: TestStruct{
+ 0x01234567,
+ 0x0123456789ABCDEF,
+ 0x01,
+ []byte{0, 1, 2},
+ true,
+ "cd",
+ cipher.PubKey{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ },
+ },
+ },
+ []byte{0, 1, 2, 3, 4},
+ }
+ buff := Serialize(s1)
+ if len(buff) == 0 {
+ t.Fail()
+ }
+
+ s2 := TestStructWithDict{}
+ if DeserializeRaw(buff, &s2) != nil {
+ t.Fail()
+ }
+ if !reflect.DeepEqual(s1, s2) {
+ t.Errorf("Expected %v but got %v", s1, s2)
+ }
+}
+
+func TestEncodeDictString2Int64(t *testing.T) {
+ v := map[string]int64{
+ "foo": 1,
+ "bar": 2,
+ }
+
+ b := Serialize(v)
+
+ v2 := make(map[string]int64)
+ err := DeserializeRaw(b, &v2)
+ require.NoError(t, err)
+
+ require.Equal(t, v, v2)
+}
+
+func TestOmitEmptyString(t *testing.T) {
+
+ type omitString struct {
+ A string `enc:"a,omitempty"`
+ }
+
+ cases := []struct {
+ name string
+ input omitString
+ outputShouldBeEmpty bool
+ }{
+ {
+ name: "string not empty",
+ input: omitString{
+ A: "foo",
+ },
+ },
+
+ {
+ name: "string empty",
+ input: omitString{},
+ outputShouldBeEmpty: true,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ b := Serialize(tc.input)
+
+ if tc.outputShouldBeEmpty {
+ require.Empty(t, b)
+ } else {
+ require.NotEmpty(t, b)
+ }
+
+ var y omitString
+ err := DeserializeRaw(b, &y)
+ require.NoError(t, err)
+
+ require.Equal(t, tc.input, y)
+ })
+ }
+
+}
+
+func TestOmitEmptySlice(t *testing.T) {
+ type omitSlice struct {
+ B []byte `enc:"b,omitempty"`
+ }
+
+ cases := []struct {
+ name string
+ input omitSlice
+ expect *omitSlice
+ outputShouldBeEmpty bool
+ }{
+ {
+ name: "slice not empty",
+ input: omitSlice{
+ B: []byte("foo"),
+ },
+ },
+
+ {
+ name: "slice nil",
+ input: omitSlice{},
+ outputShouldBeEmpty: true,
+ },
+
+ {
+ name: "slice empty but not nil",
+ input: omitSlice{
+ B: []byte{},
+ },
+ expect: &omitSlice{},
+ outputShouldBeEmpty: true,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ b := Serialize(tc.input)
+
+ if tc.outputShouldBeEmpty {
+ require.Empty(t, b)
+ } else {
+ require.NotEmpty(t, b)
+ }
+
+ var y omitSlice
+ err := DeserializeRaw(b, &y)
+ require.NoError(t, err)
+
+ expect := tc.expect
+ if expect == nil {
+ expect = &tc.input
+ }
+
+ require.Equal(t, *expect, y)
+ })
+ }
+}
+
+func TestOmitEmptyMap(t *testing.T) {
+
+ type omitMap struct {
+ C map[string]int64 `enc:"d,omitempty"`
+ }
+
+ cases := []struct {
+ name string
+ input omitMap
+ expect *omitMap
+ outputShouldBeEmpty bool
+ }{
+ {
+ name: "map not empty",
+ input: omitMap{
+ C: map[string]int64{"foo": 1},
+ },
+ },
+
+ {
+ name: "map nil",
+ input: omitMap{},
+ outputShouldBeEmpty: true,
+ },
+
+ {
+ name: "map empty but not nil",
+ input: omitMap{
+ C: map[string]int64{},
+ },
+ expect: &omitMap{},
+ outputShouldBeEmpty: true,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ b := Serialize(tc.input)
+
+ if tc.outputShouldBeEmpty {
+ require.Empty(t, b)
+ } else {
+ require.NotEmpty(t, b)
+ }
+
+ var y omitMap
+ err := DeserializeRaw(b, &y)
+ require.NoError(t, err)
+
+ expect := tc.expect
+ if expect == nil {
+ expect = &tc.input
+ }
+
+ require.Equal(t, *expect, y)
+ })
+ }
+}
+
+func TestOmitEmptyMixedFinalByte(t *testing.T) {
+ type omitMixed struct {
+ A string
+ B []byte `enc:",omitempty"`
+ }
+
+ cases := []struct {
+ name string
+ input omitMixed
+ expect omitMixed
+ }{
+ {
+ name: "none empty",
+ input: omitMixed{
+ A: "foo",
+ B: []byte("foo"),
+ },
+ expect: omitMixed{
+ A: "foo",
+ B: []byte("foo"),
+ },
+ },
+
+ {
+ name: "byte nil",
+ input: omitMixed{
+ A: "foo",
+ },
+ expect: omitMixed{
+ A: "foo",
+ },
+ },
+
+ {
+ name: "byte empty but not nil",
+ input: omitMixed{
+ A: "foo",
+ B: []byte{},
+ },
+ expect: omitMixed{
+ A: "foo",
+ },
+ },
+
+ {
+ name: "first string empty but not omitted",
+ input: omitMixed{
+ B: []byte("foo"),
+ },
+ expect: omitMixed{
+ B: []byte("foo"),
+ },
+ },
+
+ {
+ name: "all empty",
+ input: omitMixed{},
+ expect: omitMixed{},
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ b := Serialize(tc.input)
+ require.NotEmpty(t, b)
+
+ var y omitMixed
+ err := DeserializeRaw(b, &y)
+ require.NoError(t, err)
+
+ require.Equal(t, tc.expect, y)
+ })
+ }
+}
+
+func TestOmitEmptyFinalFieldOnly(t *testing.T) {
+ type bad struct {
+ A string
+ B string `enc:",omitempty"`
+ C string
+ }
+
+ require.Panics(t, func() {
+ var b bad
+ Serialize(b)
+ })
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/hash_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/hash_test.go
new file mode 100644
index 0000000..d57be0f
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/hash_test.go
@@ -0,0 +1,271 @@
+package cipher
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/hex"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/skycoin/skycoin/src/cipher/ripemd160"
+)
+
+func freshSumRipemd160(b []byte) Ripemd160 {
+ sh := ripemd160.New()
+ sh.Write(b)
+ h := Ripemd160{}
+ h.Set(sh.Sum(nil))
+ return h
+}
+
+func freshSumSHA256(b []byte) SHA256 {
+ sh := sha256.New()
+ sh.Write(b)
+ h := SHA256{}
+ h.Set(sh.Sum(nil))
+ return h
+}
+
+func randBytes(t *testing.T, n int) []byte {
+ b := make([]byte, n)
+ x, err := rand.Read(b)
+ assert.Equal(t, n, x)
+ assert.Nil(t, err)
+ return b
+}
+
+func TestHashRipemd160(t *testing.T) {
+ assert.NotPanics(t, func() { HashRipemd160(randBytes(t, 128)) })
+ r := HashRipemd160(randBytes(t, 160))
+ assert.NotEqual(t, r, Ripemd160{})
+ // 2nd hash should not be affected by previous
+ b := randBytes(t, 256)
+ r2 := HashRipemd160(b)
+ assert.NotEqual(t, r2, Ripemd160{})
+ assert.Equal(t, r2, freshSumRipemd160(b))
+}
+
+func TestRipemd160Set(t *testing.T) {
+ h := Ripemd160{}
+ assert.Panics(t, func() {
+ h.Set(randBytes(t, 21))
+ })
+ assert.Panics(t, func() {
+ h.Set(randBytes(t, 100))
+ })
+ assert.Panics(t, func() {
+ h.Set(randBytes(t, 19))
+ })
+ assert.Panics(t, func() {
+ h.Set(randBytes(t, 0))
+ })
+ assert.NotPanics(t, func() {
+ h.Set(randBytes(t, 20))
+ })
+ b := randBytes(t, 20)
+ h.Set(b)
+ assert.True(t, bytes.Equal(h[:], b))
+}
+
+func TestSHA256Set(t *testing.T) {
+ h := SHA256{}
+ assert.Panics(t, func() {
+ h.Set(randBytes(t, 33))
+ })
+ assert.Panics(t, func() {
+ h.Set(randBytes(t, 100))
+ })
+ assert.Panics(t, func() {
+ h.Set(randBytes(t, 31))
+ })
+ assert.Panics(t, func() {
+ h.Set(randBytes(t, 0))
+ })
+ assert.NotPanics(t, func() {
+ h.Set(randBytes(t, 32))
+ })
+ b := randBytes(t, 32)
+ h.Set(b)
+ assert.True(t, bytes.Equal(h[:], b))
+}
+
+func TestSHA256Hex(t *testing.T) {
+ h := SHA256{}
+ h.Set(randBytes(t, 32))
+ s := h.Hex()
+ h2, err := SHA256FromHex(s)
+ assert.Nil(t, err)
+ assert.Equal(t, h, h2)
+ assert.Equal(t, h2.Hex(), s)
+}
+
+func TestSHA256KnownValue(t *testing.T) {
+ vals := []struct {
+ input string
+ output string
+ }{
+ // These values are generated by
+ // echo -n input | sha256sum
+ {
+ "skycoin",
+ "5a42c0643bdb465d90bf673b99c14f5fa02db71513249d904573d2b8b63d353d",
+ },
+ {
+ "hello world",
+ "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9",
+ },
+ {
+ "hello world asd awd awd awdapodawpokawpod ",
+ "99d71f95cafe05ea2dddebc35b6083bd5af0e44850c9dc5139b4476c99950be4",
+ },
+ }
+ for _, io := range vals {
+ assert.Equal(t, io.output, SumSHA256([]byte(io.input)).Hex())
+ }
+}
+
+func TestSumSHA256(t *testing.T) {
+ b := randBytes(t, 256)
+ h1 := SumSHA256(b)
+ assert.NotEqual(t, h1, SHA256{})
+ // A second call to Sum should not be influenced by the original
+ c := randBytes(t, 256)
+ h2 := SumSHA256(c)
+ assert.NotEqual(t, h2, SHA256{})
+ assert.Equal(t, h2, freshSumSHA256(c))
+}
+
+func TestSHA256FromHex(t *testing.T) {
+ // Invalid hex hash
+ _, err := SHA256FromHex("cawcd")
+ assert.NotNil(t, err)
+
+ // Truncated hex hash
+ h := SumSHA256(randBytes(t, 128))
+ _, err = SHA256FromHex(hex.EncodeToString(h[:len(h)/2]))
+ assert.NotNil(t, err)
+
+ // Valid hex hash
+ h2, err := SHA256FromHex(hex.EncodeToString(h[:]))
+ assert.Equal(t, h, h2)
+ assert.Nil(t, err)
+}
+
+func TestMustSHA256FromHex(t *testing.T) {
+ // Invalid hex hash
+ assert.Panics(t, func() { MustSHA256FromHex("cawcd") })
+
+ // Truncated hex hash
+ h := SumSHA256(randBytes(t, 128))
+ assert.Panics(t, func() {
+ MustSHA256FromHex(hex.EncodeToString(h[:len(h)/2]))
+ })
+
+ // Valid hex hash
+ h2 := MustSHA256FromHex(hex.EncodeToString(h[:]))
+ assert.Equal(t, h, h2)
+}
+
+func TestMustSumSHA256(t *testing.T) {
+ b := randBytes(t, 128)
+ assert.Panics(t, func() { MustSumSHA256(b, 127) })
+ assert.Panics(t, func() { MustSumSHA256(b, 129) })
+ assert.NotPanics(t, func() { MustSumSHA256(b, 128) })
+ h := MustSumSHA256(b, 128)
+ assert.NotEqual(t, h, SHA256{})
+ assert.Equal(t, h, freshSumSHA256(b))
+}
+
+func TestDoubleSHA256(t *testing.T) {
+ b := randBytes(t, 128)
+ h := DoubleSHA256(b)
+ assert.NotEqual(t, h, SHA256{})
+ assert.NotEqual(t, h, freshSumSHA256(b))
+}
+
+func TestAddSHA256(t *testing.T) {
+ b := randBytes(t, 128)
+ h := SumSHA256(b)
+ c := randBytes(t, 64)
+ i := SumSHA256(c)
+ add := AddSHA256(h, i)
+ assert.NotEqual(t, add, SHA256{})
+ assert.NotEqual(t, add, h)
+ assert.NotEqual(t, add, i)
+ assert.Equal(t, add, SumSHA256(append(h[:], i[:]...)))
+}
+
+func TestXorSHA256(t *testing.T) {
+ b := randBytes(t, 128)
+ c := randBytes(t, 128)
+ h := SumSHA256(b)
+ i := SumSHA256(c)
+ assert.NotEqual(t, h.Xor(i), h)
+ assert.NotEqual(t, h.Xor(i), i)
+ assert.NotEqual(t, h.Xor(i), SHA256{})
+ assert.Equal(t, h.Xor(i), i.Xor(h))
+}
+
+func TestSHA256Null(t *testing.T) {
+ var x SHA256
+ require.True(t, x.Null())
+
+ b := randBytes(t, 128)
+ x = SumSHA256(b)
+
+ require.False(t, x.Null())
+}
+
+func TestNextPowerOfTwo(t *testing.T) {
+ inputs := [][]uint64{
+ {0, 1},
+ {1, 1},
+ {2, 2},
+ {3, 4},
+ {4, 4},
+ {5, 8},
+ {8, 8},
+ {14, 16},
+ {16, 16},
+ {17, 32},
+ {43345, 65536},
+ {65535, 65536},
+ {35657, 65536},
+ {65536, 65536},
+ {65537, 131072},
+ }
+ for _, i := range inputs {
+ assert.Equal(t, nextPowerOfTwo(i[0]), i[1])
+ }
+ for i := uint64(2); i < 10000; i++ {
+ p := nextPowerOfTwo(i)
+ assert.Equal(t, p%2, uint64(0))
+ assert.True(t, p >= i)
+ }
+}
+
+func TestMerkle(t *testing.T) {
+ h := SumSHA256(randBytes(t, 128))
+ // Single hash input returns hash
+ assert.Equal(t, Merkle([]SHA256{h}), h)
+ h2 := SumSHA256(randBytes(t, 128))
+ // 2 hashes should be AddSHA256 of them
+ assert.Equal(t, Merkle([]SHA256{h, h2}), AddSHA256(h, h2))
+ // 3 hashes should be Add(Add())
+ h3 := SumSHA256(randBytes(t, 128))
+ out := AddSHA256(AddSHA256(h, h2), AddSHA256(h3, SHA256{}))
+ assert.Equal(t, Merkle([]SHA256{h, h2, h3}), out)
+ // 4 hashes should be Add(Add())
+ h4 := SumSHA256(randBytes(t, 128))
+ out = AddSHA256(AddSHA256(h, h2), AddSHA256(h3, h4))
+ assert.Equal(t, Merkle([]SHA256{h, h2, h3, h4}), out)
+ // 5 hashes
+ h5 := SumSHA256(randBytes(t, 128))
+ out = AddSHA256(AddSHA256(h, h2), AddSHA256(h3, h4))
+ out = AddSHA256(out, AddSHA256(AddSHA256(h5, SHA256{}),
+ AddSHA256(SHA256{}, SHA256{})))
+ assert.Equal(t, Merkle([]SHA256{h, h2, h3, h4, h5}), out)
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256_test.go
new file mode 100644
index 0000000..d441d9d
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256_test.go
@@ -0,0 +1,665 @@
+package secp256k1
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "log"
+ "math/rand"
+ "testing"
+)
+
+const TESTS = 1 //10000 // how many tests
+const SigSize = 65 //64+1
+
+func Test_Secp256_00(t *testing.T) {
+
+ nonce := RandByte(32) //going to get bitcoins stolen!
+
+ if len(nonce) != 32 {
+ t.Fatal()
+ }
+
+}
+
+//test agreement for highest bit test
+func Test_BitTwiddle(t *testing.T) {
+ var b byte
+ for i := 0; i < 512; i++ {
+ bool1 := ((b >> 7) == 1)
+ bool2 := ((b & 0x80) == 0x80)
+ if bool1 != bool2 {
+ t.Fatal()
+ }
+ b++
+ }
+}
+
+//tests for Malleability
+//highest bit of S must be 0; 32nd byte
+func CompactSigTest(sig []byte) {
+ b := int(sig[32])
+ if b < 0 {
+ log.Panic()
+ }
+ if ((b >> 7) == 1) != ((b & 0x80) == 0x80) {
+ log.Panicf("b= %v b2= %v \n", b, b>>7)
+ }
+ if (b & 0x80) == 0x80 {
+ log.Panicf("b= %v b2= %v \n", b, b&0x80)
+ }
+}
+
+//test pubkey/private generation
+func Test_Secp256_01(t *testing.T) {
+ pubkey, seckey := GenerateKeyPair()
+ if VerifySeckey(seckey) != 1 {
+ t.Fatal()
+ }
+ if VerifyPubkey(pubkey) != 1 {
+ t.Fatal()
+ }
+}
+
+// test compressed pubkey from private key
+func Test_PubkeyFromSeckey(t *testing.T) {
+ // http://www.righto.com/2014/02/bitcoins-hard-way-using-raw-bitcoin.html
+ privkey, _ := hex.DecodeString(`f19c523315891e6e15ae0608a35eec2e00ebd6d1984cf167f46336dabd9b2de4`)
+ desiredPubKey, _ := hex.DecodeString(`03fe43d0c2c3daab30f9472beb5b767be020b81c7cc940ed7a7e910f0c1d9feef1`)
+ if pubkey := PubkeyFromSeckey(privkey); pubkey == nil {
+ t.Fatal()
+ } else if !bytes.Equal(pubkey, desiredPubKey) {
+ t.Fatal()
+ }
+}
+
+// test uncompressed pubkey from private key
+func Test_UncompressedPubkeyFromSeckey(t *testing.T) {
+ // http://www.righto.com/2014/02/bitcoins-hard-way-using-raw-bitcoin.html
+ privkey, _ := hex.DecodeString(`f19c523315891e6e15ae0608a35eec2e00ebd6d1984cf167f46336dabd9b2de4`)
+ desiredPubKey, _ := hex.DecodeString(`04fe43d0c2c3daab30f9472beb5b767be020b81c7cc940ed7a7e910f0c1d9feef10fe85eb3ce193405c2dd8453b7aeb6c1752361efdbf4f52ea8bf8f304aab37ab`)
+ if pubkey := UncompressedPubkeyFromSeckey(privkey); pubkey == nil {
+ t.Fatal()
+ } else if !bytes.Equal(pubkey, desiredPubKey) {
+ t.Fatal()
+ }
+}
+
+//returns random pubkey, seckey, hash and signature
+func RandX() ([]byte, []byte, []byte, []byte) {
+ pubkey, seckey := GenerateKeyPair()
+ msg := RandByte(32)
+ sig := Sign(msg, seckey)
+ return pubkey, seckey, msg, sig
+}
+
+func Test_SignatureVerifyPubkey(t *testing.T) {
+ pubkey1, seckey := GenerateKeyPair()
+ msg := RandByte(32)
+ sig := Sign(msg, seckey)
+ if VerifyPubkey(pubkey1) == 0 {
+ t.Fail()
+ }
+ pubkey2 := RecoverPubkey(msg, sig)
+ if bytes.Equal(pubkey1, pubkey2) == false {
+ t.Fatal("Recovered pubkey does not match")
+ }
+}
+
+func Test_verify_functions(t *testing.T) {
+ pubkey, seckey, hash, sig := RandX()
+ if VerifySeckey(seckey) == 0 {
+ t.Fail()
+ }
+ if VerifyPubkey(pubkey) == 0 {
+ t.Fail()
+ }
+ if VerifySignature(hash, sig, pubkey) == 0 {
+ t.Fail()
+ }
+ _ = sig
+}
+
+func Test_SignatureVerifySecKey(t *testing.T) {
+ pubkey, seckey := GenerateKeyPair()
+ if VerifySeckey(seckey) == 0 {
+ t.Fail()
+ }
+ if VerifyPubkey(pubkey) == 0 {
+ t.Fail()
+ }
+}
+
+//test size of messages
+func Test_Secp256_02s(t *testing.T) {
+ pubkey, seckey := GenerateKeyPair()
+ msg := RandByte(32)
+ sig := Sign(msg, seckey)
+ CompactSigTest(sig)
+ if sig == nil {
+ t.Fatal("Signature nil")
+ }
+ if len(pubkey) != 33 {
+ t.Fail()
+ }
+ if len(seckey) != 32 {
+ t.Fail()
+ }
+ if len(sig) != 64+1 {
+ t.Fail()
+ }
+ if int(sig[64]) > 4 {
+ t.Fail()
+ } //should be 0 to 4
+}
+
+//test signing message
+func Test_Secp256_02(t *testing.T) {
+ pubkey1, seckey := GenerateKeyPair()
+ msg := RandByte(32)
+ sig := Sign(msg, seckey)
+ if sig == nil {
+ t.Fatal("Signature nil")
+ }
+
+ pubkey2 := RecoverPubkey(msg, sig)
+ if pubkey2 == nil {
+ t.Fatal("Recovered pubkey invalid")
+ }
+ if bytes.Equal(pubkey1, pubkey2) == false {
+ t.Fatal("Recovered pubkey does not match")
+ }
+
+ ret := VerifySignature(msg, sig, pubkey1)
+ if ret != 1 {
+ t.Fatal("Signature invalid")
+ }
+}
+
+//test pubkey recovery
+func Test_Secp256_02a(t *testing.T) {
+ pubkey1, seckey1 := GenerateKeyPair()
+ msg := RandByte(32)
+ sig := Sign(msg, seckey1)
+
+ if sig == nil {
+ t.Fatal("Signature nil")
+ }
+ ret := VerifySignature(msg, sig, pubkey1)
+ if ret != 1 {
+ t.Fatal("Signature invalid")
+ }
+
+ pubkey2 := RecoverPubkey(msg, sig)
+ if len(pubkey1) != len(pubkey2) {
+ t.Fatal()
+ }
+ for i := range pubkey1 {
+ if pubkey1[i] != pubkey2[i] {
+ t.Fatal()
+ }
+ }
+ if bytes.Equal(pubkey1, pubkey2) == false {
+ t.Fatal()
+ }
+}
+
+//test random messages for the same pub/private key
+func Test_Secp256_03(t *testing.T) {
+ _, seckey := GenerateKeyPair()
+ for i := 0; i < TESTS; i++ {
+ msg := RandByte(32)
+ sig := Sign(msg, seckey)
+ CompactSigTest(sig)
+
+ sig[len(sig)-1] %= 4
+ pubkey2 := RecoverPubkey(msg, sig)
+ if pubkey2 == nil {
+ t.Fail()
+ }
+ }
+}
+
+//test random messages for different pub/private keys
+func Test_Secp256_04(t *testing.T) {
+ for i := 0; i < TESTS; i++ {
+ pubkey1, seckey := GenerateKeyPair()
+ msg := RandByte(32)
+ sig := Sign(msg, seckey)
+ CompactSigTest(sig)
+
+ if sig[len(sig)-1] >= 4 {
+ t.Fail()
+ }
+ pubkey2 := RecoverPubkey(msg, sig)
+ if pubkey2 == nil {
+ t.Fail()
+ }
+ if bytes.Equal(pubkey1, pubkey2) == false {
+ t.Fail()
+ }
+ }
+}
+
+//test random signatures against fixed messages; should fail
+
+//crashes:
+// -SIPA look at this
+
+func randSig() []byte {
+ sig := RandByte(65)
+ sig[32] &= 0x70
+ sig[64] %= 4
+ return sig
+}
+
+func Test_Secp256_06a_alt0(t *testing.T) {
+ pubkey1, seckey := GenerateKeyPair()
+ msg := RandByte(32)
+ sig := Sign(msg, seckey)
+
+ if sig == nil {
+ t.Fail()
+ }
+ if len(sig) != 65 {
+ t.Fail()
+ }
+ for i := 0; i < TESTS; i++ {
+ sig = randSig()
+ pubkey2 := RecoverPubkey(msg, sig)
+
+ if bytes.Equal(pubkey1, pubkey2) == true {
+ t.Fail()
+ }
+
+ if pubkey2 != nil && VerifySignature(msg, sig, pubkey2) != 1 {
+ t.Fail()
+ }
+
+ if VerifySignature(msg, sig, pubkey1) == 1 {
+ t.Fail()
+ }
+ }
+}
+
+//test random messages against valid signature: should fail
+
+func Test_Secp256_06b(t *testing.T) {
+ pubkey1, seckey := GenerateKeyPair()
+ msg := RandByte(32)
+ sig := Sign(msg, seckey)
+
+ failCount := 0
+ for i := 0; i < TESTS; i++ {
+ msg = RandByte(32)
+ pubkey2 := RecoverPubkey(msg, sig)
+ if bytes.Equal(pubkey1, pubkey2) == true {
+ t.Fail()
+ }
+
+ if pubkey2 != nil && VerifySignature(msg, sig, pubkey2) != 1 {
+ t.Fail()
+ }
+
+ if VerifySignature(msg, sig, pubkey1) == 1 {
+ t.Fail()
+ }
+ }
+ if failCount != 0 {
+ fmt.Printf("ERROR: Accepted signature for %v of %v random messages\n", failCount, TESTS)
+ }
+}
+
+/*
+ Deterministic Keypair Tests
+*/
+
+func Test_Deterministic_Keypairs_00(t *testing.T) {
+ for i := 0; i < 64; i++ {
+ seed := RandByte(64)
+ _, pub1, sec1 := DeterministicKeyPairIterator(seed)
+ pub2, sec2 := GenerateDeterministicKeyPair(seed)
+
+ if bytes.Equal(pub1, pub2) == false {
+ t.Fail()
+ }
+ if bytes.Equal(sec1, sec2) == false {
+ t.Fail()
+ }
+ }
+}
+
+func Test_Deterministic_Keypairs_01(t *testing.T) {
+ for i := 0; i < 64; i++ {
+ seed := RandByte(32)
+ _, pub1, sec1 := DeterministicKeyPairIterator(seed)
+ pub2, sec2 := GenerateDeterministicKeyPair(seed)
+
+ if bytes.Equal(pub1, pub2) == false {
+ t.Fail()
+ }
+ if bytes.Equal(sec1, sec2) == false {
+ t.Fail()
+ }
+ }
+}
+
+func Test_Deterministic_Keypairs_02(t *testing.T) {
+ for i := 0; i < 64; i++ {
+ seed := RandByte(32)
+ _, pub1, sec1 := DeterministicKeyPairIterator(seed)
+ pub2, sec2 := GenerateDeterministicKeyPair(seed)
+
+ if bytes.Equal(pub1, pub2) == false {
+ t.Fail()
+ }
+ if bytes.Equal(sec1, sec2) == false {
+ t.Fail()
+ }
+ }
+}
+
+func Decode(str string) []byte {
+ byt, err := hex.DecodeString(str)
+ if err != nil {
+ log.Panic()
+ }
+ return byt
+}
+
+func Test_Deterministic_Keypairs_03(t *testing.T) {
+
+ //test vectors: seed, seckey
+ var testArray = []string{
+ "tQ93w5Aqcunm9SGUfnmF4fJv", "9b8c3e36adce64dedc80d6dfe51ff1742cc1d755bbad457ac01177c5a18a789f",
+ "DC7qdQQtbWSSaekXnFmvQgse", "d2deaf4a9ff7a5111fe1d429d6976cbde78811fdd075371a2a4449bb0f4d8bf9",
+ "X8EkuUZC7Td7PAXeS7Duc7vR", "cad79b6dcf7bd21891cbe20a51c57d59689ae6e3dc482cd6ec22898ac00cd86b",
+ "tVqPYHHNVPRWyEed62v7f23u", "2a386e94e9ffaa409517cbed81b9b2d4e1c5fb4afe3cbd67ce8aba11af0b02fa",
+ "kCy4R57HDfLqF3pVhBWxuMcg", "26a7c6d8809c476a56f7455209f58b5ff3f16435fcf208ff2931ece60067f305",
+ "j8bjv86ZNjKqzafR6mtSUVCE", "ea5c0f8c9f091a70bf38327adb9b2428a9293e7a7a75119920d759ecfa03a995",
+ "qShryAzVY8EtsuD3dsAc7qnG", "331206176509bcae31c881dc51e90a4e82ec33cd7208a5fb4171ed56602017fa",
+ "5FGG7ZBa8wVMBJkmzpXj5ESX", "4ea2ad82e7730d30c0c21d01a328485a0cf5543e095139ba613929be7739b52c",
+ "f46TZG4xJHXUGWx8ekbNqa9F", "dcddd403d3534c4ef5703cc07a771c107ed49b7e0643c6a2985a96149db26108",
+ "XkZdQJ5LT96wshN8JBH8rvEt", "3e276219081f072dff5400ca29a9346421eaaf3c419ff1474ac1c81ad8a9d6e1",
+ "GFDqXU4zYymhJJ9UGqRgS8ty", "95be4163085b571e725edeffa83fff8e7a7db3c1ccab19d0f3c6e105859b5e10",
+ "tmwZksH2XyvuamnddYxyJ5Lp", "2666dd54e469df56c02e82dffb4d3ea067daafe72c54dc2b4f08c4fb3a7b7e42",
+ "EuqZFsbAV5amTzkhgAMgjr7W", "40c325c01f2e4087fcc97fcdbea6c35c88a12259ebf1bce0b14a4d77f075abbf",
+ "TW6j8rMffZfmhyDEt2JUCrLB", "e676e0685c5d1afd43ad823b83db5c6100135c35485146276ee0b0004bd6689e",
+ "8rvkBnygfhWP8kjX9aXq68CY", "21450a646eed0d4aa50a1736e6c9bf99fff006a470aab813a2eff3ee4d460ae4",
+ "phyRfPDuf9JMRFaWdGh7NXPX", "ca7bc04196c504d0e815e125f7f1e086c8ae8c10d5e9df984aeab4b41bf9e398",
+ }
+
+ for i := 0; i < len(testArray)/2; i++ {
+ seed := []byte(testArray[2*i+0])
+ sec1 := Decode(testArray[2*i+1])
+
+ _, sec2 := GenerateDeterministicKeyPair(seed)
+ if bytes.Equal(sec1, sec2) == false {
+ t.Fail()
+ }
+ }
+}
+
+func Test_DeterministicWallets1(t *testing.T) {
+
+ var testArray = []string{
+ "90c56f5b8d78a46fb4cddf6fd9c6d88d6d2d7b0ec35917c7dac12c03b04e444e", "94dd1a9de9ffd57b5516b8a7f090da67f142f7d22356fa5d1b894ee4d4fba95b",
+ "a3b08ccf8cbae4955c02f223be1f97d2bb41d92b7f0c516eb8467a17da1e6057", "82fba4cc2bc29eef122f116f45d01d82ff488d7ee713f8a95c162a64097239e0",
+ "7048eb8fa93cec992b93dc8e93c5543be34aad05239d4c036cf9e587bbcf7654", "44c059496aac871ac168bb6889b9dd3decdb9e1fa082442a95fcbca982643425",
+ "6d25375591bbfce7f601fc5eb40e4f3dde2e453dc4bf31595d8ec29e4370cd80", "d709ceb1a6fb906de506ea091c844ca37c65e52778b8d257d1dd3a942ab367fb",
+ "7214b4c09f584c5ddff971d469df130b9a3c03e0277e92be159279de39462120", "5fe4986fa964773041e119d2b6549acb392b2277a72232af75cbfb62c357c1a7",
+ "b13e78392d5446ae304b5fc9d45b85f26996982b2c0c86138afdac8d2ea9016e", "f784abc2e7f11ee84b4adb72ea4730a6aabe27b09604c8e2b792d8a1a31881ac",
+ "9403bff4240a5999e17e0ab4a645d6942c3a7147c7834e092e461a4580249e6e", "d495174b8d3f875226b9b939121ec53f9383bd560d34aa5ca3ac6b257512adf4",
+ "2665312a3e3628f4df0b9bc6334f530608a9bcdd4d1eef174ecda99f51a6db94", "1fdc9fbfc6991b9416b3a8385c9942e2db59009aeb2d8de349b73d9f1d389374",
+ "6cb37532c80765b7c07698502a49d69351036f57a45a5143e33c57c236d841ca", "c87c85a6f482964db7f8c31720981925b1e357a9fdfcc585bc2164fdef1f54d0",
+ "8654a32fa120bfdb7ca02c487469070eba4b5a81b03763a2185fdf5afd756f3c", "e2767d788d1c5620f3ef21d57f2d64559ab203c044f0a5f0730b21984e77019c",
+ "66d1945ceb6ef8014b1b6703cb624f058913e722f15d03225be27cb9d8aabe4a", "3fcb80eb1d5b91c491408447ac4e221fcb2254c861adbb5a178337c2750b0846",
+ "22c7623bf0e850538329e3e6d9a6f9b1235350824a3feaad2580b7a853550deb", "5577d4be25f1b44487140a626c8aeca2a77507a1fc4fd466dd3a82234abb6785",
+ "a5eebe3469d68c8922a1a8b5a0a2b55293b7ff424240c16feb9f51727f734516", "c07275582d0681eb07c7b51f0bca0c48c056d571b7b83d84980ab40ac7d7d720",
+ "479ec3b589b14aa7290b48c2e64072e4e5b15ce395d2072a5a18b0a2cf35f3fd", "f10e2b7675dfa557d9e3188469f12d3e953c2d46dce006cd177b6ae7f465cfc0",
+ "63952334b731ec91d88c54614925576f82e3610d009657368fc866e7b1efbe73", "0bcbebb39d8fe1cb3eab952c6f701656c234e462b945e2f7d4be2c80b8f2d974",
+ "256472ee754ef6af096340ab1e161f58e85fb0cc7ae6e6866b9359a1657fa6c1", "88ba6f6c66fc0ef01c938569c2dd1f05475cb56444f4582d06828e77d54ffbe6",
+ }
+
+ for i := 0; i < len(testArray)/2; i++ {
+ seed := Decode(testArray[2*i+0]) //input
+ seckey1 := Decode(testArray[2*i+1]) //target
+ _, _, seckey2 := DeterministicKeyPairIterator(seed) //output
+ if bytes.Equal(seckey1, seckey2) == false {
+ t.Fail()
+ }
+ }
+}
+
+func Test_Secp256k1_Hash(t *testing.T) {
+
+ var testArray = []string{
+ "90c56f5b8d78a46fb4cddf6fd9c6d88d6d2d7b0ec35917c7dac12c03b04e444e", "a70c36286be722d8111e69e910ce4490005bbf9135b0ce8e7a59f84eee24b88b",
+ "a3b08ccf8cbae4955c02f223be1f97d2bb41d92b7f0c516eb8467a17da1e6057", "e9db072fe5817325504174253a056be7b53b512f1e588f576f1f5a82cdcad302",
+ "7048eb8fa93cec992b93dc8e93c5543be34aad05239d4c036cf9e587bbcf7654", "5e9133e83c4add2b0420d485e1dcda5c00e283c6509388ab8ceb583b0485c13b",
+ "6d25375591bbfce7f601fc5eb40e4f3dde2e453dc4bf31595d8ec29e4370cd80", "8d5579cd702c06c40fb98e1d55121ea0d29f3a6c42f5582b902ac243f29b571a",
+ "7214b4c09f584c5ddff971d469df130b9a3c03e0277e92be159279de39462120", "3a4e8c72921099a0e6a4e7f979df4c8bced63063097835cdfd5ee94548c9c41a",
+ "b13e78392d5446ae304b5fc9d45b85f26996982b2c0c86138afdac8d2ea9016e", "462efa1bf4f639ffaedb170d6fb8ba363efcb1bdf0c5aef0c75afb59806b8053",
+ "9403bff4240a5999e17e0ab4a645d6942c3a7147c7834e092e461a4580249e6e", "68dd702ea7c7352632876e9dc2333142fce857a542726e402bb480cad364f260",
+ "2665312a3e3628f4df0b9bc6334f530608a9bcdd4d1eef174ecda99f51a6db94", "5db72c31d575c332e60f890c7e68d59bd3d0ac53a832e06e821d819476e1f010",
+ "6cb37532c80765b7c07698502a49d69351036f57a45a5143e33c57c236d841ca", "0deb20ec503b4c678213979fd98018c56f24e9c1ec99af3cd84b43c161a9bb5c",
+ "8654a32fa120bfdb7ca02c487469070eba4b5a81b03763a2185fdf5afd756f3c", "36f3ede761aa683813013ffa84e3738b870ce7605e0a958ed4ffb540cd3ea504",
+ "66d1945ceb6ef8014b1b6703cb624f058913e722f15d03225be27cb9d8aabe4a", "6bcb4819a96508efa7e32ee52b0227ccf5fbe5539687aae931677b24f6d0bbbd",
+ "22c7623bf0e850538329e3e6d9a6f9b1235350824a3feaad2580b7a853550deb", "8bb257a1a17fd2233935b33441d216551d5ff1553d02e4013e03f14962615c16",
+ "a5eebe3469d68c8922a1a8b5a0a2b55293b7ff424240c16feb9f51727f734516", "d6b780983a63a3e4bcf643ee68b686421079c835a99eeba6962fe41bb355f8da",
+ "479ec3b589b14aa7290b48c2e64072e4e5b15ce395d2072a5a18b0a2cf35f3fd", "39c5f108e7017e085fe90acfd719420740e57768ac14c94cb020d87e36d06752",
+ "63952334b731ec91d88c54614925576f82e3610d009657368fc866e7b1efbe73", "79f654976732106c0e4a97ab3b6d16f343a05ebfcc2e1d679d69d396e6162a77",
+ "256472ee754ef6af096340ab1e161f58e85fb0cc7ae6e6866b9359a1657fa6c1", "387883b86e2acc153aa334518cea48c0c481b573ccaacf17c575623c392f78b2",
+ }
+
+ for i := 0; i < len(testArray)/2; i++ {
+ hash1 := Decode(testArray[2*i+0]) //input
+ hash2 := Decode(testArray[2*i+1]) //target
+ hash3 := Secp256k1Hash(hash1) //output
+ if bytes.Equal(hash2, hash3) == false {
+ t.Fail()
+ }
+ }
+}
+
+func Test_Secp256k1_Equal(t *testing.T) {
+
+ for i := 0; i < 64; i++ {
+ seed := RandByte(128)
+
+ hash1 := Secp256k1Hash(seed)
+ hash2, _, _ := DeterministicKeyPairIterator(seed)
+
+ if bytes.Equal(hash1, hash2) == false {
+ t.Fail()
+ }
+ }
+}
+
+func Test_DeterministicWalletGeneration(t *testing.T) {
+ in := "8654a32fa120bfdb7ca02c487469070eba4b5a81b03763a2185fdf5afd756f3c"
+ secOut := "10ba0325f1b8633ca463542950b5cd5f97753a9829ba23477c584e7aee9cfbd5"
+ pubOut := "0249964ac7e3fe1b2c182a2f10abe031784e374cc0c665a63bc76cc009a05bc7c6"
+
+ var seed = []byte(in)
+ var pubkey []byte
+ var seckey []byte
+
+ for i := 0; i < 1024; i++ {
+ seed, pubkey, seckey = DeterministicKeyPairIterator(seed)
+ }
+
+ if bytes.Equal(seckey, Decode(secOut)) == false {
+ t.Fail()
+ }
+
+ if bytes.Equal(pubkey, Decode(pubOut)) == false {
+ t.Fail()
+ }
+}
+
+func Test_ECDH(t *testing.T) {
+
+ pubkey1, seckey1 := GenerateKeyPair()
+ pubkey2, seckey2 := GenerateKeyPair()
+
+ puba := ECDH(pubkey1, seckey2)
+ pubb := ECDH(pubkey2, seckey1)
+
+ if puba == nil {
+ t.Fail()
+ }
+
+ if pubb == nil {
+ t.Fail()
+ }
+
+ if bytes.Equal(puba, pubb) == false {
+ t.Fail()
+ }
+
+}
+
+func Test_ECDH2(t *testing.T) {
+
+ for i := 0; i < 16*1024; i++ {
+
+ pubkey1, seckey1 := GenerateKeyPair()
+ pubkey2, seckey2 := GenerateKeyPair()
+
+ puba := ECDH(pubkey1, seckey2)
+ pubb := ECDH(pubkey2, seckey1)
+
+ if puba == nil {
+ t.Fail()
+ }
+
+ if pubb == nil {
+ t.Fail()
+ }
+
+ if bytes.Equal(puba, pubb) == false {
+ t.Fail()
+ }
+ }
+}
+
+/*
+seed = ee78b2fb5bef47aaab1abf54106b3b022ed3d68fdd24b5cfdd6e639e1c7baa6f
+seckey = 929c5f23a17115199e61b2c4c38fea06f763270a0d1189fbc6a46ddac05081fa
+pubkey1 = 028a4d9f32e7bd25befd0afa9e73755f35ae2f7012dfc7c000252f2afba2589af2
+pubkey2 = 028a4d9f32e7bd25befd0afa9e73755f35ae2f7012dfc80000252f2afba2589af2
+key_wif = L28hjib16NuBT4L1gK4DgzKjjxaCDggeZpXFy93MdZVz9fTZKwiE
+btc_addr1 = 14mvZw1wC8nKtycrTHu6NRTfWHuNVCpRgL
+btc_addr2 = 1HuwS7qARGMgNB7zao1FPmqiiZ92tsJGpX
+deterministic pubkeys do not match
+seed = 0e86692d755fd39a51acf6c935bdf425a6aad03a7914867e3f6db27371c966b4
+seckey = c9d016b26102fb309a73e644f6be308614a1b8f6f46f902c906ffaf0993ee63c
+pubkey1 = 03e86d62256dd05c2852c05a6b11d423f278288abeab490000b93d387de45a2f73
+pubkey2 = 03e86d62256dd05c2852c05a6b11d423f278288abeab494000b93d387de45a2f73
+key_wif = L3z1TTmgddKUm2Em22zKwLXGZ7jfwXLN5GxebpgH5iohaRJSm98D
+btc_addr1 = 1CcrzXvK34Cf4jzTko5uhCwbsC6e6K4rHw
+btc_addr2 = 1GtBH7dcZnh69Anqe8sHXKSJ9Dk4jXGHyp
+*/
+
+func Test_Abnormal_Keys(t *testing.T) {
+
+ for i := 0; i < 32*1024; i++ {
+
+ seed := RandByte(32)
+
+ pubkey1, seckey1 := generateDeterministicKeyPair(seed)
+
+ if seckey1 == nil {
+ t.Fail()
+ }
+
+ if pubkey1 == nil {
+ t.Fail()
+ }
+
+ if VerifyPubkey(pubkey1) != 1 {
+ seedHex := hex.EncodeToString(seed)
+ seckeyHex := hex.EncodeToString(seckey1)
+ log.Printf("seed= %s", seedHex)
+ log.Printf("seckey= %s", seckeyHex)
+ t.Errorf("GenerateKeyPair, generates key that fails validation, run=%d", i)
+ }
+ }
+}
+
+//problem seckeys
+var _testSeckey = []string{
+ "08efb79385c9a8b0d1c6f5f6511be0c6f6c2902963d874a3a4bacc18802528d3",
+ "78298d9ecdc0640c9ae6883201a53f4518055442642024d23c45858f45d0c3e6",
+ "04e04fe65bfa6ded50a12769a3bd83d7351b2dbff08c9bac14662b23a3294b9e",
+ "2f5141f1b75747996c5de77c911dae062d16ae48799052c04ead20ccd5afa113",
+}
+
+//test known bad keys
+func Test_Abnormal_Keys2(t *testing.T) {
+
+ for i := 0; i < len(_testSeckey); i++ {
+
+ seckey1, _ := hex.DecodeString(_testSeckey[i])
+ pubkey1 := PubkeyFromSeckey(seckey1)
+ if pubkey1 == nil {
+ t.Fail()
+ }
+
+ if seckey1 == nil {
+ t.Fail()
+ }
+
+ if pubkey1 == nil {
+ t.Fail()
+ }
+
+ if VerifyPubkey(pubkey1) != 1 {
+ t.Errorf("generates key that fails validation")
+ }
+ }
+}
+
+func _pairGen(seckey []byte) []byte {
+ return nil
+}
+
+//ECDH test
+func Test_Abnormal_Keys3(t *testing.T) {
+
+ for i := 0; i < len(_testSeckey); i++ {
+
+ seckey1, _ := hex.DecodeString(_testSeckey[i])
+ pubkey1 := PubkeyFromSeckey(seckey1)
+
+ seckey2, _ := hex.DecodeString(_testSeckey[rand.Int()%len(_testSeckey)])
+ pubkey2 := PubkeyFromSeckey(seckey2)
+
+ if pubkey1 == nil {
+ t.Errorf("pubkey1 nil")
+ }
+
+ if pubkey2 == nil {
+ t.Errorf("pubkey2 nil")
+ }
+ //pubkey1, seckey1 := GenerateKeyPair()
+ //pubkey2, seckey2 := GenerateKeyPair()
+
+ puba := ECDH(pubkey1, seckey2)
+ pubb := ECDH(pubkey2, seckey1)
+
+ if puba == nil {
+ t.Fail()
+ }
+
+ if pubb == nil {
+ t.Fail()
+ }
+
+ if bytes.Equal(puba, pubb) == false {
+ t.Errorf("recovered do not match")
+ }
+ }
+
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/ec_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/ec_test.go
new file mode 100644
index 0000000..109694a
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/ec_test.go
@@ -0,0 +1,225 @@
+package secp256k1go
+
+import (
+ "testing"
+)
+
+//var ta = [][3]string{
+// // [0]-pubScr, [1]-sigScript, [2]-unsignedTx
+// {
+// "040eaebcd1df2df853d66ce0e1b0fda07f67d1cabefde98514aad795b86a6ea66dbeb26b67d7a00e2447baeccc8a4cef7cd3cad67376ac1c5785aeebb4f6441c16",
+// "3045022100fe00e013c244062847045ae7eb73b03fca583e9aa5dbd030a8fd1c6dfcf11b1002207d0d04fed8fa1e93007468d5a9e134b0a7023b6d31db4e50942d43a250f4d07c01",
+// "3382219555ddbb5b00e0090f469e590ba1eae03c7f28ab937de330aa60294ed6",
+// },
+// {
+// "020eaebcd1df2df853d66ce0e1b0fda07f67d1cabefde98514aad795b86a6ea66d",
+// "3045022100fe00e013c244062847045ae7eb73b03fca583e9aa5dbd030a8fd1c6dfcf11b1002207d0d04fed8fa1e93007468d5a9e134b0a7023b6d31db4e50942d43a250f4d07c01",
+// "3382219555ddbb5b00e0090f469e590ba1eae03c7f28ab937de330aa60294ed6",
+// },
+// {
+// "0411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
+// "304402204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860a4acdd12909d831cc56cbbac4622082221a8768d1d0901",
+// "7a05c6145f10101e9d6325494245adf1297d80f8f38d4d576d57cdba220bcb19",
+// },
+// {
+// "0311db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
+// "304402204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860a4acdd12909d831cc56cbbac4622082221a8768d1d0901",
+// "7a05c6145f10101e9d6325494245adf1297d80f8f38d4d576d57cdba220bcb19",
+// },
+// {
+// "0428f42723f81c70664e200088437282d0e11ae0d4ae139f88bdeef1550471271692970342db8e3f9c6f0123fab9414f7865d2db90c24824da775f00e228b791fd",
+// "3045022100d557da5d9bf886e0c3f98fd6d5d337487cd01d5b887498679a57e3d32bd5d0af0220153217b63a75c3145b14f58c64901675fe28dba2352c2fa9f2a1579c74a2de1701",
+// "c22de395adbb0720941e009e8a4e488791b2e428af775432ed94d2c7ec8e421a",
+// },
+// {
+// "0328f42723f81c70664e200088437282d0e11ae0d4ae139f88bdeef15504712716",
+// "3045022100d557da5d9bf886e0c3f98fd6d5d337487cd01d5b887498679a57e3d32bd5d0af0220153217b63a75c3145b14f58c64901675fe28dba2352c2fa9f2a1579c74a2de1701",
+// "c22de395adbb0720941e009e8a4e488791b2e428af775432ed94d2c7ec8e421a",
+// },
+// {
+// "041f2a00036b3cbd1abe71dca54d406a1e9dd5d376bf125bb109726ff8f2662edcd848bd2c44a86a7772442095c7003248cc619bfec3ddb65130b0937f8311c787",
+// "3045022100ec6eb6b2aa0580c8e75e8e316a78942c70f46dd175b23b704c0330ab34a86a34022067a73509df89072095a16dbf350cc5f1ca5906404a9275ebed8a4ba219627d6701",
+// "7c8e7c2cb887682ed04dc82c9121e16f6d669ea3d57a2756785c5863d05d2e6a",
+// },
+// {
+// "031f2a00036b3cbd1abe71dca54d406a1e9dd5d376bf125bb109726ff8f2662edc",
+// "3045022100ec6eb6b2aa0580c8e75e8e316a78942c70f46dd175b23b704c0330ab34a86a34022067a73509df89072095a16dbf350cc5f1ca5906404a9275ebed8a4ba219627d6701",
+// "7c8e7c2cb887682ed04dc82c9121e16f6d669ea3d57a2756785c5863d05d2e6a",
+// },
+// {
+// "04ee90bfdd4e07eb1cfe9c6342479ca26c0827f84bfe1ab39e32fc3e94a0fe00e6f7d8cd895704e974978766dd0f9fad3c97b1a0f23684e93b400cc9022b7ae532",
+// "3045022100fe1f6e2c2c2cbc916f9f9d16497df2f66a4834e5582d6da0ee0474731c4a27580220682bad9359cd946dc97bb07ea8fad48a36f9b61186d47c6798ccce7ba20cc22701",
+// "baff983e6dfb1052918f982090aa932f56d9301d1de9a726d2e85d5f6bb75464",
+// },
+//}
+
+// func TestVerify1(t *testing.T) {
+// for i := range ta {
+// pkey, _ := hex.DecodeString(ta[i][0])
+// sign, _ := hex.DecodeString(ta[i][1])
+// hasz, _ := hex.DecodeString(ta[i][2])
+
+// res := ecdsaVerify(pkey, sign, hasz)
+// if res != 1 {
+// log.Println("error code", res)
+// t.Fatal("Verify failed at", i)
+// }
+
+// hasz[0]++
+// res = ecdsaVerify(pkey, sign, hasz)
+// if res != 0 {
+// t.Error("Verify not failed while it should", i)
+// }
+// res = ecdsaVerify(pkey[:1], sign, hasz)
+// if res >= 0 {
+// t.Error("Negative result expected", res, i)
+// }
+// res = ecdsaVerify(pkey, sign[:1], hasz)
+// if res >= 0 {
+// t.Error("Yet negative result expected", res, i)
+// }
+// res = ecdsaVerify(pkey, sign, hasz[:1])
+// if res != 0 {
+// t.Error("Zero expected", res, i)
+// }
+// }
+// }
+
+// func BenchmarkVerifyUncompressed(b *testing.B) {
+// key, _ := hex.DecodeString("040eaebcd1df2df853d66ce0e1b0fda07f67d1cabefde98514aad795b86a6ea66dbeb26b67d7a00e2447baeccc8a4cef7cd3cad67376ac1c5785aeebb4f6441c16")
+// sig, _ := hex.DecodeString("3045022100fe00e013c244062847045ae7eb73b03fca583e9aa5dbd030a8fd1c6dfcf11b1002207d0d04fed8fa1e93007468d5a9e134b0a7023b6d31db4e50942d43a250f4d07c01")
+// msg, _ := hex.DecodeString("3382219555ddbb5b00e0090f469e590ba1eae03c7f28ab937de330aa60294ed6")
+// b.ResetTimer()
+// for i := 0; i < b.N; i++ {
+// ecdsaVerify(key, sig, msg)
+// }
+// }
+
+// func BenchmarkVerifyCompressed(b *testing.B) {
+// keyCompr, _ := hex.DecodeString("020eaebcd1df2df853d66ce0e1b0fda07f67d1cabefde98514aad795b86a6ea66d")
+// sig, _ := hex.DecodeString("3045022100fe00e013c244062847045ae7eb73b03fca583e9aa5dbd030a8fd1c6dfcf11b1002207d0d04fed8fa1e93007468d5a9e134b0a7023b6d31db4e50942d43a250f4d07c01")
+// msg, _ := hex.DecodeString("3382219555ddbb5b00e0090f469e590ba1eae03c7f28ab937de330aa60294ed6")
+// b.ResetTimer()
+// for i := 0; i < b.N; i++ {
+// ecdsaVerify(keyCompr, sig, msg)
+// }
+// }
+
+func TestECmult(t *testing.T) {
+ var u1, u2 Number
+ var pubkeyj, expres, pr XYZ
+
+ pubkeyj.X.SetHex("0EAEBCD1DF2DF853D66CE0E1B0FDA07F67D1CABEFDE98514AAD795B86A6EA66D")
+ pubkeyj.Y.SetHex("BEB26B67D7A00E2447BAECCC8A4CEF7CD3CAD67376AC1C5785AEEBB4F6441C16")
+ pubkeyj.Z.SetHex("0000000000000000000000000000000000000000000000000000000000000001")
+
+ u1.SetHex("B618EBA71EC03638693405C75FC1C9ABB1A74471BAAF1A3A8B9005821491C4B4")
+ u2.SetHex("8554470195DE4678B06EDE9F9286545B51FF2D9AA756CE35A39011783563EA60")
+
+ expres.X.SetHex("EB6752420B6BDB40A760AC26ADD7E7BBD080BF1DF6C0B009A0D310E4511BDF49")
+ expres.Y.SetHex("8E8CEB84E1502FC536FFE67967BC44314270A0B38C79865FFED5A85D138DCA6B")
+ expres.Z.SetHex("813925AF112AAB8243F8CCBADE4CC7F63DF387263028DE6E679232A73A7F3C31")
+
+ pubkeyj.ECmult(&pr, &u2, &u1)
+ if !pr.Equals(&expres) {
+ t.Error("ECmult failed")
+ pr.Print("got")
+ expres.Print("exp")
+ }
+}
+
+type wnafvec struct {
+ inp string
+ w uint
+ exp []int
+}
+
+func TestWNAF(t *testing.T) {
+ var wnaf [129]int
+ var testvcs = []wnafvec{
+ {
+ "3271156f58b59bd7aa542ca6972c1910", winA,
+ []int{0, 0, 0, 0, -15, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, -11, 0, 0, 0, 0, 0, -11, 0, 0, 0, 0, 13, 0, 0, 0, 0, 1, 0, 0, 0, 0, -11, 0, 0, 0, 0, -11, 0, 0, 0, 0, -5, 0, 0, 0, 0, 0, 0, -5, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 11, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 15, 0, 0, 0, 0, 11, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, -15, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 3},
+ },
+ {
+ "0a8a5afcb465a43b8277801311860430", winA,
+ []int{0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, -15, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 7, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 0, -15, 0, 0, 0, 0, -11, 0, 0, 0, 0, 0, -13, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, -5, 0, 0, 0, 0, -13, 0, 0, 0, 0, 3, 0, 0, 0, 0, -11, 0, 0, 0, 0, 1},
+ },
+ {
+ "b1a74471baaf1a3a8b9005821491c4b4", winG,
+ []int{0, 0, -3795, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2633, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 705, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -5959, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1679, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1361, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4551, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1693, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11},
+ },
+ {
+ "b618eba71ec03638693405c75fc1c9ab", winG,
+ []int{2475, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -249, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -4549, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -6527, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -8165, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -6369, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -7249, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1457},
+ },
+ }
+ for idx := range testvcs {
+ var xxx Number
+ xxx.SetHex(testvcs[idx].inp)
+ bits := ecmultWnaf(wnaf[:], &xxx, testvcs[idx].w)
+ if bits != len(testvcs[idx].exp) {
+ t.Error("Bad bits at idx", idx)
+ }
+ for i := range testvcs[idx].exp {
+ if wnaf[i] != testvcs[idx].exp[i] {
+ t.Error("Bad val at idx", idx, i)
+ }
+ }
+ }
+}
+
+func TestPrecompileGej(t *testing.T) {
+ var exp, a XYZ
+
+ a.X.SetHex("0eaebcd1df2df853d66ce0e1b0fda07f67d1cabefde98514aad795b86a6ea66d")
+ a.Y.SetHex("beb26b67d7a00e2447baeccc8a4cef7cd3cad67376ac1c5785aeebb4f6441c16")
+ a.Z.SetHex("01")
+ exp.X.SetHex("ce5dcac5e26ab63868ead1440f359aff29d7ffade62abe801bca97b471bcd416")
+ exp.Y.SetHex("0cc6f63793a207751d507aa4be629f0776441e4873548095bd6d39d34ce8a9d7")
+ exp.Z.SetHex("122927e4908740d51df1f03dc921c00fef68c542e7f28aa270862619cf971815")
+ pre := a.precomp(winA)
+ if len(pre) != 8 {
+ t.Error("Bad result length")
+ }
+ if !pre[7].Equals(&exp) {
+ t.Error("Unexpcted value")
+ }
+
+ a.X.SetHex("a45720c272cfa1f77f64be8a404a7d3149bd5410f9a173353f6eb75a5085ba98")
+ a.Y.SetHex("beb26b67d7a00e2447baeccc8a4cef7cd3cad67376ac1c5785aeebb4f6441c16")
+ a.Z.SetHex("01")
+ exp.X.SetHex("ce5dcac5e26ab63868ead1440f359aff29d7ffade62abe801bca97b471bcd416")
+ exp.Y.SetHex("0cc6f63793a207751d507aa4be629f0776441e4873548095bd6d39d34ce8a9d7")
+ exp.Z.SetHex("49f0fb9f1840e7a58d485c6cc394e597e521bf7d4598be2b367c27326949e507")
+ pre = a.precomp(winA)
+ if len(pre) != 8 {
+ t.Error("Bad result length")
+ }
+ if !pre[7].Equals(&exp) {
+ t.Error("Unexpcted value")
+ }
+}
+
+func TestMultGen(t *testing.T) {
+ var nonce Number
+ var ex, ey, ez Field
+ var r XYZ
+ nonce.SetHex("9E3CD9AB0F32911BFDE39AD155F527192CE5ED1F51447D63C4F154C118DA598E")
+ ECmultGen(&r, &nonce)
+ ex.SetHex("02D1BF36D37ACD68E4DD00DB3A707FD176A37E42F81AEF9386924032D3428FF0")
+ ey.SetHex("FD52E285D33EC835230EA69F89D9C38673BD5B995716A4063C893AF02F938454")
+ ez.SetHex("4C6ACE7C8C062A1E046F66FD8E3981DC4E8E844ED856B5415C62047129268C1B")
+ r.X.Normalize()
+ r.Y.Normalize()
+ r.Z.Normalize()
+ if !ex.Equals(&r.X) {
+ t.Error("Bad X")
+ }
+ if !ey.Equals(&r.Y) {
+ t.Error("Bad Y")
+ }
+ if !ez.Equals(&r.Z) {
+ t.Error("Bad Y")
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/field_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/field_test.go
new file mode 100644
index 0000000..24f3b5a
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/field_test.go
@@ -0,0 +1,36 @@
+package secp256k1go
+
+import (
+ "crypto/rand"
+ "testing"
+)
+
+func TestFeInv(t *testing.T) {
+ var in, out, exp Field
+ in.SetHex("813925AF112AAB8243F8CCBADE4CC7F63DF387263028DE6E679232A73A7F3C31")
+ exp.SetHex("7F586430EA30F914965770F6098E492699C62EE1DF6CAFFA77681C179FDF3117")
+ in.Inv(&out)
+ if !out.Equals(&exp) {
+ t.Error("fe.Inv() failed")
+ }
+}
+
+func BenchmarkFieldSqrt(b *testing.B) {
+ var dat [32]byte
+ var f, tmp Field
+ rand.Read(dat[:])
+ f.SetB32(dat[:])
+ for i := 0; i < b.N; i++ {
+ f.Sqrt(&tmp)
+ }
+}
+
+func BenchmarkFieldInv(b *testing.B) {
+ var dat [32]byte
+ var f, tmp Field
+ rand.Read(dat[:])
+ f.SetB32(dat[:])
+ for i := 0; i < b.N; i++ {
+ f.Inv(&tmp)
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/sig_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/sig_test.go
new file mode 100644
index 0000000..d87c487
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/sig_test.go
@@ -0,0 +1,150 @@
+package secp256k1go
+
+import (
+ "encoding/hex"
+ "strconv"
+ "testing"
+)
+
+func TestSigRecover(t *testing.T) {
+ var vs = [][6]string{
+ {
+ "6028b9e3a31c9e725fcbd7d5d16736aaaafcc9bf157dfb4be62bcbcf0969d488",
+ "036d4a36fa235b8f9f815aa6f5457a607f956a71a035bf0970d8578bf218bb5a",
+ "9cff3da1a4f86caf3683f865232c64992b5ed002af42b321b8d8a48420680487",
+ "0",
+ "56dc5df245955302893d8dda0677cc9865d8011bc678c7803a18b5f6faafec08",
+ "54b5fbdcd8fac6468dac2de88fadce6414f5f3afbb103753e25161bef77705a6",
+ },
+ {
+ "b470e02f834a3aaafa27bd2b49e07269e962a51410f364e9e195c31351a05e50",
+ "560978aed76de9d5d781f87ed2068832ed545f2b21bf040654a2daff694c8b09",
+ "9ce428d58e8e4caf619dc6fc7b2c2c28f0561654d1f80f322c038ad5e67ff8a6",
+ "1",
+ "15b7e7d00f024bffcd2e47524bb7b7d3a6b251e23a3a43191ed7f0a418d9a578",
+ "bf29a25e2d1f32c5afb18b41ae60112723278a8af31275965a6ec1d95334e840",
+ },
+ }
+
+ var sig Signature
+ var pubkey, exp XY
+ var msg Number
+
+ for i := range vs {
+ sig.R.SetHex(vs[i][0])
+ sig.S.SetHex(vs[i][1])
+ msg.SetHex(vs[i][2])
+ rid, _ := strconv.ParseInt(vs[i][3], 10, 32)
+ exp.X.SetHex(vs[i][4])
+ exp.Y.SetHex(vs[i][5])
+
+ if sig.Recover(&pubkey, &msg, int(rid)) {
+ if !exp.X.Equals(&pubkey.X) {
+ t.Error("X mismatch at vector", i)
+ }
+ if !exp.Y.Equals(&pubkey.Y) {
+ t.Error("Y mismatch at vector", i)
+ }
+ } else {
+ t.Error("sig.recover fialed")
+ }
+ }
+}
+
+func TestSigVerify(t *testing.T) {
+ var msg Number
+ var sig Signature
+ var key XY
+
+ //// len(65) keys are rejected now, this test case is invalid:
+ // msg.SetHex("3382219555ddbb5b00e0090f469e590ba1eae03c7f28ab937de330aa60294ed6")
+ // sig.R.SetHex("fe00e013c244062847045ae7eb73b03fca583e9aa5dbd030a8fd1c6dfcf11b10")
+ // sig.S.SetHex("7d0d04fed8fa1e93007468d5a9e134b0a7023b6d31db4e50942d43a250f4d07c")
+ // xy, _ := hex.DecodeString("040eaebcd1df2df853d66ce0e1b0fda07f67d1cabefde98514aad795b86a6ea66dbeb26b67d7a00e2447baeccc8a4cef7cd3cad67376ac1c5785aeebb4f6441c16")
+ // key.ParsePubkey(xy)
+ // if !sig.Verify(&key, &msg) {
+ // t.Error("sig.Verify 0")
+ // }
+
+ msg.SetHex("D474CBF2203C1A55A411EEC4404AF2AFB2FE942C434B23EFE46E9F04DA8433CA")
+ sig.R.SetHex("98F9D784BA6C5C77BB7323D044C0FC9F2B27BAA0A5B0718FE88596CC56681980")
+ sig.S.SetHex("E3599D551029336A745B9FB01566624D870780F363356CEE1425ED67D1294480")
+ key.X.SetHex("7d709f85a331813f9ae6046c56b3a42737abf4eb918b2e7afee285070e968b93")
+ key.Y.SetHex("26150d1a63b342986c373977b00131950cb5fc194643cad6ea36b5157eba4602")
+ if !sig.Verify(&key, &msg) {
+ t.Error("sig.Verify 1")
+ }
+
+ msg.SetHex("2c43a883f4edc2b66c67a7a355b9312a565bb3d33bb854af36a06669e2028377")
+ sig.R.SetHex("6b2fa9344462c958d4a674c2a42fbedf7d6159a5276eb658887e2e1b3915329b")
+ sig.S.SetHex("eddc6ea7f190c14a0aa74e41519d88d2681314f011d253665f301425caf86b86")
+ xy, _ := hex.DecodeString("02a60d70cfba37177d8239d018185d864b2bdd0caf5e175fd4454cc006fd2d75ac")
+ key.ParsePubkey(xy)
+ if !sig.Verify(&key, &msg) {
+ t.Error("sig.Verify 2")
+ }
+}
+
+func TestSigSign(t *testing.T) {
+ var sec, msg, non Number
+ var sig Signature
+ var recid int
+ sec.SetHex("73641C99F7719F57D8F4BEB11A303AFCD190243A51CED8782CA6D3DBE014D146")
+ msg.SetHex("D474CBF2203C1A55A411EEC4404AF2AFB2FE942C434B23EFE46E9F04DA8433CA")
+ non.SetHex("9E3CD9AB0F32911BFDE39AD155F527192CE5ED1F51447D63C4F154C118DA598E")
+ res := sig.Sign(&sec, &msg, &non, &recid)
+ if res != 1 {
+ t.Error("res failed", res)
+ }
+ if forceLowS {
+ if recid != 0 {
+ t.Error("recid failed", recid)
+ }
+ } else {
+ if recid != 1 {
+ t.Error("recid failed", recid)
+ }
+ }
+ non.SetHex("98f9d784ba6c5c77bb7323d044c0fc9f2b27baa0a5b0718fe88596cc56681980")
+ if sig.R.Cmp(&non.Int) != 0 {
+ t.Error("R failed", sig.R.String())
+ }
+ if forceLowS {
+ non.SetHex("1ca662aaefd6cc958ba4604fea999db133a75bf34c13334dabac7124ff0cfcc1")
+ } else {
+ non.SetHex("E3599D551029336A745B9FB01566624D870780F363356CEE1425ED67D1294480")
+ }
+ if sig.S.Cmp(&non.Int) != 0 {
+ t.Error("S failed", sig.S.String())
+ }
+}
+
+func BenchmarkVerify(b *testing.B) {
+ var msg Number
+ var sig Signature
+ var key XY
+ msg.SetHex("D474CBF2203C1A55A411EEC4404AF2AFB2FE942C434B23EFE46E9F04DA8433CA")
+ sig.R.SetHex("98F9D784BA6C5C77BB7323D044C0FC9F2B27BAA0A5B0718FE88596CC56681980")
+ sig.S.SetHex("E3599D551029336A745B9FB01566624D870780F363356CEE1425ED67D1294480")
+ key.X.SetHex("7d709f85a331813f9ae6046c56b3a42737abf4eb918b2e7afee285070e968b93")
+ key.Y.SetHex("26150d1a63b342986c373977b00131950cb5fc194643cad6ea36b5157eba4602")
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if !sig.Verify(&key, &msg) {
+ b.Fatal("sig_verify failed")
+ }
+ }
+}
+
+func BenchmarkSign(b *testing.B) {
+ var sec, msg, non Number
+ var sig Signature
+ var recid int
+ sec.SetHex("73641C99F7719F57D8F4BEB11A303AFCD190243A51CED8782CA6D3DBE014D146")
+ msg.SetHex("D474CBF2203C1A55A411EEC4404AF2AFB2FE942C434B23EFE46E9F04DA8433CA")
+ non.SetHex("9E3CD9AB0F32911BFDE39AD155F527192CE5ED1F51447D63C4F154C118DA598E")
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ sig.Sign(&sec, &msg, &non, &recid)
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/xyz_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/xyz_test.go
new file mode 100644
index 0000000..b392785
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/xyz_test.go
@@ -0,0 +1,49 @@
+package secp256k1go
+
+import (
+ "testing"
+)
+
+func _TestGejDouble(t *testing.T) {
+ var a, aExp, r XYZ
+ a.X.SetHex("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798")
+ a.Y.SetHex("483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8")
+ a.Z.SetHex("01")
+ aExp.X.SetHex("7D152C041EA8E1DC2191843D1FA9DB55B68F88FEF695E2C791D40444B365AFC2")
+ aExp.Y.SetHex("56915849F52CC8F76F5FD7E4BF60DB4A43BF633E1B1383F85FE89164BFADCBDB")
+ aExp.Z.SetHex("9075B4EE4D4788CABB49F7F81C221151FA2F68914D0AA833388FA11FF621A970")
+
+ a.Double(&r)
+ if !r.Equals(&aExp) {
+ t.Error("gej.Double failed")
+ }
+}
+
+func TestGejMulLambda(t *testing.T) {
+ var a, aExp XYZ
+ a.X.SetHex("0eaebcd1df2df853d66ce0e1b0fda07f67d1cabefde98514aad795b86a6ea66d")
+ a.Y.SetHex("beb26b67d7a00e2447baeccc8a4cef7cd3cad67376ac1c5785aeebb4f6441c16")
+ a.Z.SetHex("01")
+ aExp.X.SetHex("a45720c272cfa1f77f64be8a404a7d3149bd5410f9a173353f6eb75a5085ba98")
+ aExp.Y.SetHex("beb26b67d7a00e2447baeccc8a4cef7cd3cad67376ac1c5785aeebb4f6441c16")
+ aExp.Z.SetHex("01")
+ aLam := a
+ aLam.mulLambda(&aLam)
+ if !aLam.Equals(&aExp) {
+ t.Error("mul_lambda failed")
+ }
+}
+
+func TestGejGetX(t *testing.T) {
+ var a XYZ
+ var X, exp Field
+ a.X.SetHex("EB6752420B6BDB40A760AC26ADD7E7BBD080BF1DF6C0B009A0D310E4511BDF49")
+ a.Y.SetHex("8E8CEB84E1502FC536FFE67967BC44314270A0B38C79865FFED5A85D138DCA6B")
+ a.Z.SetHex("813925AF112AAB8243F8CCBADE4CC7F63DF387263028DE6E679232A73A7F3C31")
+
+ exp.SetHex("fe00e013c244062847045ae7eb73b03fca583e9aa5dbd030a8fd1c6dfcf11b10")
+ a.getX(&X)
+ if !X.Equals(&exp) {
+ t.Error("get.get_x() fail")
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/coin/block_test.go b/vendor/github.com/skycoin/skycoin/src/coin/block_test.go
new file mode 100644
index 0000000..c17aa97
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/coin/block_test.go
@@ -0,0 +1,191 @@
+// build ignore
+
+package coin
+
+import (
+ "errors"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/skycoin/skycoin/src/cipher"
+ "github.com/skycoin/skycoin/src/testutil"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func badFeeCalc(t *Transaction) (uint64, error) {
+ return 0, errors.New("Bad")
+}
+
+func makeNewBlock(uxHash cipher.SHA256) (*Block, error) {
+ body := BlockBody{
+ Transactions: Transactions{Transaction{}},
+ }
+
+ prev := Block{
+ Body: body,
+ Head: BlockHeader{
+ Version: 0x02,
+ Time: 100,
+ BkSeq: 0,
+ Fee: 10,
+ PrevHash: cipher.SHA256{},
+ BodyHash: body.Hash(),
+ }}
+ return NewBlock(prev, 100+20, uxHash, Transactions{Transaction{}}, _feeCalc)
+}
+
+func addTransactionToBlock(t *testing.T, b *Block) Transaction {
+ tx := makeTransaction(t)
+ b.Body.Transactions = append(b.Body.Transactions, tx)
+ return tx
+}
+
+func TestNewBlock(t *testing.T) {
+ // TODO -- update this test for newBlock changes
+ prev := Block{Head: BlockHeader{Version: 0x02, Time: 100, BkSeq: 98}}
+ uxHash := testutil.RandSHA256(t)
+ txns := Transactions{Transaction{}}
+ // invalid txn fees panics
+ _, err := NewBlock(prev, 133, uxHash, txns, badFeeCalc)
+ require.EqualError(t, err, fmt.Sprintf("Invalid transaction fees: Bad"))
+
+ // no txns panics
+ _, err = NewBlock(prev, 133, uxHash, nil, _feeCalc)
+ require.EqualError(t, err, "Refusing to create block with no transactions")
+
+ _, err = NewBlock(prev, 133, uxHash, Transactions{}, _feeCalc)
+ require.EqualError(t, err, "Refusing to create block with no transactions")
+
+ // valid block is fine
+ fee := uint64(121)
+ currentTime := uint64(133)
+ b, err := NewBlock(prev, currentTime, uxHash, txns, func(t *Transaction) (uint64, error) {
+ return fee, nil
+ })
+ require.NoError(t, err)
+ assert.Equal(t, b.Body.Transactions, txns)
+ assert.Equal(t, b.Head.Fee, fee*uint64(len(txns)))
+ assert.Equal(t, b.Body, BlockBody{Transactions: txns})
+ assert.Equal(t, b.Head.PrevHash, prev.HashHeader())
+ assert.Equal(t, b.Head.Time, currentTime)
+ assert.Equal(t, b.Head.BkSeq, prev.Head.BkSeq+1)
+ assert.Equal(t, b.Head.UxHash, uxHash)
+}
+
+func TestBlockHashHeader(t *testing.T) {
+ uxHash := testutil.RandSHA256(t)
+ b, err := makeNewBlock(uxHash)
+ require.NoError(t, err)
+ assert.Equal(t, b.HashHeader(), b.Head.Hash())
+ assert.NotEqual(t, b.HashHeader(), cipher.SHA256{})
+}
+
+func TestBlockHashBody(t *testing.T) {
+ uxHash := testutil.RandSHA256(t)
+ b, err := makeNewBlock(uxHash)
+ require.NoError(t, err)
+ assert.Equal(t, b.HashBody(), b.Body.Hash())
+ hb := b.HashBody()
+ hashes := b.Body.Transactions.Hashes()
+ tx := addTransactionToBlock(t, b)
+ assert.NotEqual(t, b.HashBody(), hb)
+ hashes = append(hashes, tx.Hash())
+ assert.Equal(t, b.HashBody(), cipher.Merkle(hashes))
+ assert.Equal(t, b.HashBody(), b.Body.Hash())
+}
+
+func TestNewGenesisBlock(t *testing.T) {
+ gb, err := NewGenesisBlock(genAddress, _genCoins, _genTime)
+ require.NoError(t, err)
+
+ require.Equal(t, cipher.SHA256{}, gb.Head.PrevHash)
+ require.Equal(t, _genTime, gb.Head.Time)
+ require.Equal(t, uint64(0), gb.Head.BkSeq)
+ require.Equal(t, uint32(0), gb.Head.Version)
+ require.Equal(t, uint64(0), gb.Head.Fee)
+ require.Equal(t, cipher.SHA256{}, gb.Head.UxHash)
+
+ require.Equal(t, 1, len(gb.Body.Transactions))
+ tx := gb.Body.Transactions[0]
+ require.Len(t, tx.In, 0)
+ require.Len(t, tx.Sigs, 0)
+ require.Len(t, tx.Out, 1)
+
+ require.Equal(t, genAddress, tx.Out[0].Address)
+ require.Equal(t, _genCoins, tx.Out[0].Coins)
+ require.Equal(t, _genCoins, tx.Out[0].Hours)
+}
+
+func TestCreateUnspent(t *testing.T) {
+ tx := Transaction{}
+ tx.PushOutput(genAddress, 11e6, 255)
+ bh := BlockHeader{
+ Time: tNow(),
+ BkSeq: uint64(1),
+ }
+
+ tt := []struct {
+ name string
+ txIndex int
+ err error
+ }{
+ {
+ "ok",
+ 0,
+ nil,
+ },
+ {
+ "index overflow",
+ 10,
+ errors.New("Transaction out index is overflow"),
+ },
+ }
+
+ for _, tc := range tt {
+ t.Run(tc.name, func(t *testing.T) {
+ uxout, err := CreateUnspent(bh, tx, tc.txIndex)
+ require.Equal(t, tc.err, err)
+ if err != nil {
+ return
+ }
+ assertUnspent(t, bh, tx, tc.txIndex, uxout)
+ })
+ }
+}
+
+func TestCreateUnspents(t *testing.T) {
+ tx := Transaction{}
+ tx.PushOutput(genAddress, 11e6, 255)
+ bh := BlockHeader{
+ Time: tNow(),
+ BkSeq: uint64(1),
+ }
+ uxouts := CreateUnspents(bh, tx)
+ assert.Equal(t, len(uxouts), 1)
+ assertValidUnspents(t, bh, tx, uxouts)
+}
+
+func assertUnspent(t *testing.T, bh BlockHeader, tx Transaction, txIndex int, ux UxOut) {
+ assert.Equal(t, bh.Time, ux.Head.Time)
+ assert.Equal(t, bh.BkSeq, ux.Head.BkSeq)
+ assert.Equal(t, tx.Hash(), ux.Body.SrcTransaction)
+ assert.Equal(t, tx.Out[txIndex].Address, ux.Body.Address)
+ assert.Equal(t, tx.Out[txIndex].Coins, ux.Body.Coins)
+ assert.Equal(t, tx.Out[txIndex].Hours, ux.Body.Hours)
+}
+
+func assertValidUnspents(t *testing.T, bh BlockHeader, tx Transaction,
+ uxo UxArray) {
+ assert.Equal(t, len(tx.Out), len(uxo))
+ for i, ux := range uxo {
+ assert.Equal(t, bh.Time, ux.Head.Time)
+ assert.Equal(t, bh.BkSeq, ux.Head.BkSeq)
+ assert.Equal(t, tx.Hash(), ux.Body.SrcTransaction)
+ assert.Equal(t, tx.Out[i].Address, ux.Body.Address)
+ assert.Equal(t, tx.Out[i].Coins, ux.Body.Coins)
+ assert.Equal(t, tx.Out[i].Hours, ux.Body.Hours)
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/coin/coin_test.go b/vendor/github.com/skycoin/skycoin/src/coin/coin_test.go
new file mode 100644
index 0000000..a371fe3
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/coin/coin_test.go
@@ -0,0 +1,299 @@
+package coin
+
+import (
+ "encoding/hex"
+ "math/rand"
+ "testing"
+
+ "github.com/skycoin/skycoin/src/cipher"
+ "github.com/skycoin/skycoin/src/util/utc"
+)
+
+var (
+ genPublic, genSecret = cipher.GenerateKeyPair()
+ genAddress = cipher.AddressFromPubKey(genPublic)
+ _genTime uint64 = 1000
+ _genCoins uint64 = 1000e6
+ _genCoinHours uint64 = 1000 * 1000
+)
+
+func tNow() uint64 {
+ return uint64(utc.UnixNow())
+}
+
+func _feeCalc(t *Transaction) (uint64, error) {
+ return 0, nil
+}
+
+func TestAddress1(t *testing.T) {
+ a := "02fa939957e9fc52140e180264e621c2576a1bfe781f88792fb315ca3d1786afb8"
+ b, err := hex.DecodeString(a)
+ if err != nil {
+ t.Fatal(err)
+ }
+ addr := cipher.AddressFromPubKey(cipher.NewPubKey(b))
+ _ = addr
+
+ ///func SignHash(hash cipher.SHA256, sec SecKey) (Sig, error) {
+
+}
+
+func TestAddress2(t *testing.T) {
+ a := "5a42c0643bdb465d90bf673b99c14f5fa02db71513249d904573d2b8b63d353d"
+ b, err := hex.DecodeString(a)
+ if err != nil {
+ t.Fail()
+ }
+
+ if len(b) != 32 {
+ t.Fail()
+ }
+
+ seckey := cipher.NewSecKey(b)
+ pubkey := cipher.PubKeyFromSecKey(seckey)
+ addr := cipher.AddressFromPubKey(pubkey)
+ _ = addr
+
+ ///func SignHash(hash cipher.SHA256, sec SecKey) (Sig, error) {
+
+}
+
+//TODO: 100% coverage over cryptographic functions
+
+//Crypto Functions to Test
+//func ChkSig(address Address, hash cipher.SHA256, sig Sig) error {
+//func SignHash(hash cipher.SHA256, sec SecKey) (Sig, error) {
+//func cipher.PubKeyFromSecKey(seckey SecKey) (PubKey) {
+//func PubKeyFromSig(sig Sig, hash cipher.SHA256) (PubKey, error) {
+//func VerifySignature(pubkey PubKey, sig Sig, hash cipher.SHA256) error {
+//func GenerateKeyPair() (PubKey, SecKey) {
+//func GenerateDeterministicKeyPair(seed []byte) (PubKey, SecKey) {
+//func testSecKey(seckey SecKey) error {
+
+func TestCrypto1(t *testing.T) {
+ for i := 0; i < 10; i++ {
+ _, seckey := cipher.GenerateKeyPair()
+ if cipher.TestSecKey(seckey) != nil {
+ t.Fatal("CRYPTOGRAPHIC INTEGRITY CHECK FAILED")
+ }
+ }
+}
+
+//test signatures
+func TestCrypto2(t *testing.T) {
+ a := "5a42c0643bdb465d90bf673b99c14f5fa02db71513249d904573d2b8b63d353d"
+ b, err := hex.DecodeString(a)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(b) != 32 {
+ t.Fatal()
+ }
+
+ seckey := cipher.NewSecKey(b)
+ pubkey := cipher.PubKeyFromSecKey(seckey)
+
+ addr := cipher.AddressFromPubKey(pubkey)
+ _ = addr
+
+ test := []byte("test message")
+ hash := cipher.SumSHA256(test)
+ err = cipher.TestSecKeyHash(seckey, hash)
+ if err != nil {
+ t.Fatal()
+ }
+
+}
+
+func _gensec() cipher.SecKey {
+ _, s := cipher.GenerateKeyPair()
+ return s
+}
+
+func _gpub(s cipher.SecKey) cipher.PubKey {
+ return cipher.PubKeyFromSecKey(s)
+}
+
+func _gaddr(s cipher.SecKey) cipher.Address {
+ return cipher.AddressFromSecKey(s)
+}
+
+func _gaddrA1(S []cipher.SecKey) []cipher.Address {
+ A := make([]cipher.Address, len(S))
+ for i := 0; i < len(S); i++ {
+ A[i] = cipher.AddressFromSecKey(S[i])
+ }
+ return A
+}
+
+func _gaddrA2(S []cipher.SecKey, O []UxOut) []int {
+ A := _gaddrA1(S)
+ var M map[cipher.Address]int //address to int
+ for i, a := range A {
+ M[a] = i
+ }
+
+ I := make([]int, len(O)) //output to seckey/address index
+ for i, o := range O {
+ I[i] = M[o.Body.Address]
+ }
+
+ return I
+}
+
+func _gaddrA3(S []cipher.SecKey) map[cipher.Address]int {
+ A := _gaddrA1(S)
+ M := make(map[cipher.Address]int) //address to int
+ for i, a := range A {
+ M[a] = i
+ }
+ return M
+}
+
+//assign amt to n bins in randomized manner
+func _randBins(amt uint64, n int) []uint64 {
+ bins := make([]uint64, n)
+ max := amt / (4 * uint64(n))
+ for i := 0; amt > 0; i++ {
+ //amount going into this bin
+ b := 1 + (uint64(rand.Int63()) % max)
+ if b > amt {
+ b = amt
+ }
+ bins[i%n] += b
+ amt -= b
+ }
+ return bins
+}
+
+/*
+TODO: check block header of new block
+TODO: check that coins are not created or destroyed
+TODO:
+*/
+
+//create 4096 addresses
+//send addreses randomly between each other over 1024 blocks
+
+/*
+func TestBlockchain1(t *testing.T) {
+
+ var S []SecKey
+ for i := 0; i < 4096; i++ {
+ S = append(S, _gensec())
+ }
+
+ A := _gaddr_a1(S)
+
+ var bc *Blockchain = NewBlockchain(A[0])
+
+ for i := 0; i < 1024; i++ {
+ b := bc.NewBlock()
+
+ //unspent outputs
+ U := make([]UxOut, len(bc.Unspent))
+ copy(U, bc.Unspent)
+
+ //for _,Ux := range U {
+ // if Ux.Hours() < Ux.Body.
+ //}
+ //I := _gaddr_a2(S,U)
+ M := _gaddr_a3(S, U)
+ var num_in int = 1 + rand.Intn(len(U))%15
+ var num_out int = 1 + rand.Int()%30
+
+ var t Transaction
+
+ SigIdx := make([]int, num_in)
+
+ var v1 uint64 = 0
+ var v2 uint64 = 0
+ for i := 0; i < num_in; i++ {
+ idx := rand.Intn(len(U))
+ var Ux UxOut = U[idx] //unspent output to spend
+ U[idx], U = U[len(U)-1], U[:len(U)-1] //remove output idx
+
+ v1 += Ux.Body.Coins
+ v2 += Ux.Body.Hours
+
+ //index of signature that must sign input
+ SigIdx[i] = M[Ux.Body.Address] //signature index
+
+ var ti TransactionInput
+ ti.SigIdx = uint16(i)
+ ti.UxOut = Ux.Hash()
+ t.TxIn = append(t.TxIn, ti) //append input to transaction
+ }
+
+ //assign coins to output addresses in random manner
+
+ //check that inputs/outputs sum
+ v1_ := v1
+ v2_ := v2
+
+ vo1 := _rand_bins(v1, num_out)
+ vo2 := _rand_bins(v2, num_out)
+
+ var v1_t uint64
+ var v2_t uint64
+ for i, _ := range vo1 {
+ v1_t += vo1[i]
+ v2_t += vo2[i]
+ }
+
+ if v1_t != v1_ {
+ log.Panic()
+ }
+ if v2_t != v2_ {
+ log.Panic()
+ }
+ //log.Printf("%v %v, %v %v \n", v1_,v2_, v1_t, v2_t)
+
+ for i := 0; i < num_out; i++ {
+ var to TransactionOutput
+ to.Address = A[rand.Intn(len(A))]
+ to.Coins = vo1[i]
+ to.Hours = vo2[i]
+ t.TxOut = append(t.TxOut, to)
+ }
+
+ //transaction complete, now set signatures
+ for i := 0; i < num_in; i++ {
+ t.SetSig(uint16(i), S[SigIdx[i]])
+ }
+ t.UpdateHeader() //sets hash
+
+ err := bc.AppendTransaction(&b, t)
+ if err != nil {
+ log.Panic(err)
+ }
+
+ fmt.Printf("Block %v \n", i)
+ err = bc.ExecuteBlock(b)
+ if err != nil {
+ log.Panic(err)
+ }
+
+ }
+}
+*/
+
+/*
+func TestGetListenPort(t *testing.T) {
+ // No connectionMirror found
+ assert.Equal(t, getListenPort(addr), uint16(0))
+ // No mirrorConnection map exists
+ ConnectionMirrors[addr] = uint32(4)
+ assert.Panics(t, func() { getListenPort(addr) })
+ // Everything is good
+ m := make(map[string]uint16)
+ mirrorConnections[uint32(4)] = m
+ m[addrIP] = uint16(6667)
+ assert.Equal(t, getListenPort(addr), uint16(6667))
+
+ // cleanup
+ delete(mirrorConnections, uint32(4))
+ delete(ConnectionMirrors, addr)
+}
+*/
diff --git a/vendor/github.com/skycoin/skycoin/src/coin/math_test.go b/vendor/github.com/skycoin/skycoin/src/coin/math_test.go
new file mode 100644
index 0000000..c628b4a
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/coin/math_test.go
@@ -0,0 +1,116 @@
+package coin
+
+import (
+ "fmt"
+ "math"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestAddUint64(t *testing.T) {
+ n, err := AddUint64(10, 11)
+ require.NoError(t, err)
+ require.Equal(t, uint64(21), n)
+
+ _, err = AddUint64(math.MaxUint64, 1)
+ require.Error(t, err)
+}
+
+func TestAddUint32(t *testing.T) {
+ n, err := addUint32(10, 11)
+ require.NoError(t, err)
+ require.Equal(t, uint32(21), n)
+
+ _, err = addUint32(math.MaxUint32, 1)
+ require.Error(t, err)
+}
+
+func TestMultUint64(t *testing.T) {
+ n, err := multUint64(10, 11)
+ require.NoError(t, err)
+ require.Equal(t, uint64(110), n)
+
+ _, err = multUint64(math.MaxUint64/2, 3)
+ require.Error(t, err)
+}
+
+func TestUint64ToInt64(t *testing.T) {
+ cases := []struct {
+ a uint64
+ b int64
+ err error
+ }{
+ {
+ a: 0,
+ b: 0,
+ },
+ {
+ a: 1,
+ b: 1,
+ },
+ {
+ a: math.MaxInt64,
+ b: math.MaxInt64,
+ },
+ {
+ a: math.MaxUint64,
+ err: ErrUint64OverflowsInt64,
+ },
+ {
+ a: math.MaxInt64 + 1,
+ err: ErrUint64OverflowsInt64,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(fmt.Sprint(tc.a), func(t *testing.T) {
+ x, err := Uint64ToInt64(tc.a)
+ if tc.err != nil {
+ require.Equal(t, tc.err, err)
+ } else {
+ require.Equal(t, tc.b, x)
+ }
+ })
+ }
+}
+
+func TestInt64ToUint64(t *testing.T) {
+ cases := []struct {
+ a int64
+ b uint64
+ err error
+ }{
+ {
+ a: 0,
+ b: 0,
+ },
+ {
+ a: 1,
+ b: 1,
+ },
+ {
+ a: math.MaxInt64,
+ b: math.MaxInt64,
+ },
+ {
+ a: -math.MaxInt64,
+ err: ErrInt64UnderflowsUint64,
+ },
+ {
+ a: -1,
+ err: ErrInt64UnderflowsUint64,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(fmt.Sprint(tc.a), func(t *testing.T) {
+ x, err := Int64ToUint64(tc.a)
+ if tc.err != nil {
+ require.Equal(t, tc.err, err)
+ } else {
+ require.Equal(t, tc.b, x)
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/coin/outputs_test.go b/vendor/github.com/skycoin/skycoin/src/coin/outputs_test.go
new file mode 100644
index 0000000..ff71940
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/coin/outputs_test.go
@@ -0,0 +1,577 @@
+package coin
+
+import (
+ "bytes"
+ "errors"
+ "math"
+ "sort"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/skycoin/skycoin/src/cipher"
+ "github.com/skycoin/skycoin/src/testutil"
+)
+
+func makeUxBody(t *testing.T) UxBody {
+ body, _ := makeUxBodyWithSecret(t)
+ return body
+}
+
+func makeUxOut(t *testing.T) UxOut {
+ ux, _ := makeUxOutWithSecret(t)
+ return ux
+}
+
+func makeUxBodyWithSecret(t *testing.T) (UxBody, cipher.SecKey) {
+ p, s := cipher.GenerateKeyPair()
+ return UxBody{
+ SrcTransaction: testutil.RandSHA256(t),
+ Address: cipher.AddressFromPubKey(p),
+ Coins: 1e6,
+ Hours: 100,
+ }, s
+}
+
+func makeUxOutWithSecret(t *testing.T) (UxOut, cipher.SecKey) {
+ body, sec := makeUxBodyWithSecret(t)
+ return UxOut{
+ Head: UxHead{
+ Time: 100,
+ BkSeq: 2,
+ },
+ Body: body,
+ }, sec
+}
+
+func TestUxBodyHash(t *testing.T) {
+ uxb := makeUxBody(t)
+ h := uxb.Hash()
+ assert.NotEqual(t, h, cipher.SHA256{})
+}
+
+func TestUxOutHash(t *testing.T) {
+ uxb := makeUxBody(t)
+ uxo := UxOut{Body: uxb}
+ assert.Equal(t, uxb.Hash(), uxo.Hash())
+ // Head should not affect hash
+ uxo.Head = UxHead{0, 1}
+ assert.Equal(t, uxb.Hash(), uxo.Hash())
+}
+
+func TestUxOutSnapshotHash(t *testing.T) {
+ ux := makeUxOut(t)
+ h := ux.SnapshotHash()
+ // snapshot hash should be dependent on every field in body and head
+ ux2 := ux
+ ux2.Head.Time = 20
+ assert.NotEqual(t, ux2.SnapshotHash(), h)
+ ux2 = ux
+ ux2.Head.BkSeq = 4
+ assert.NotEqual(t, ux2.SnapshotHash(), h)
+ ux2 = ux
+ ux2.Body.SrcTransaction = testutil.RandSHA256(t)
+ assert.NotEqual(t, ux2.SnapshotHash(), h)
+ ux2 = ux
+ ux2.Body.Address = makeAddress()
+ assert.NotEqual(t, ux2.SnapshotHash(), h)
+ ux2 = ux
+ ux2.Body.Coins = ux.Body.Coins * 2
+ assert.NotEqual(t, ux2.SnapshotHash(), h)
+ ux2 = ux
+ ux2.Body.Hours = ux.Body.Hours * 2
+ assert.NotEqual(t, ux2.SnapshotHash(), h)
+}
+
+func TestUxOutCoinHours(t *testing.T) {
+ uxo := makeUxOut(t)
+
+ // Less than 1 hour passed
+ now := uint64(100) + uxo.Head.Time
+ hours, err := uxo.CoinHours(now)
+ require.NoError(t, err)
+ require.Equal(t, hours, uxo.Body.Hours)
+
+ // 1 hours passed
+ now = uint64(3600) + uxo.Head.Time
+ hours, err = uxo.CoinHours(now)
+ require.NoError(t, err)
+ require.Equal(t, hours, uxo.Body.Hours+(uxo.Body.Coins/1e6))
+
+ // 6 hours passed
+ now = uint64(3600*6) + uxo.Head.Time
+ hours, err = uxo.CoinHours(now)
+ require.NoError(t, err)
+ require.Equal(t, hours, uxo.Body.Hours+(uxo.Body.Coins/1e6)*6)
+
+ // Time is backwards (treated as no hours passed)
+ now = uxo.Head.Time / 2
+ hours, err = uxo.CoinHours(now)
+ require.NoError(t, err)
+ require.Equal(t, hours, uxo.Body.Hours)
+
+ // 1 hour has passed, output has 1.5 coins, should gain 1 coinhour
+ uxo.Body.Coins = 1e6 + 5e5
+ now = uint64(3600) + uxo.Head.Time
+ hours, err = uxo.CoinHours(now)
+ require.NoError(t, err)
+ require.Equal(t, uxo.Body.Hours+1, hours)
+
+ // 2 hours have passed, output has 1.5 coins, should gain 3 coin hours
+ uxo.Body.Coins = 1e6 + 5e5
+ now = uint64(3600*2) + uxo.Head.Time
+ hours, err = uxo.CoinHours(now)
+ require.NoError(t, err)
+ require.Equal(t, uxo.Body.Hours+3, hours, "%d != %d", uxo.Body.Hours+3, hours)
+
+ // 1 second has passed, output has 3600 coins, should gain 1 coin hour
+ uxo.Body.Coins = 3600e6
+ now = uint64(1) + uxo.Head.Time
+ hours, err = uxo.CoinHours(now)
+ require.NoError(t, err)
+ require.Equal(t, uxo.Body.Hours+1, hours)
+
+ // 1000000 hours minus 1 second have passed, output has 1 droplet, should gain 0 coin hour
+ uxo.Body.Coins = 1
+ now = uint64(1000000*3600-1) + uxo.Head.Time
+ hours, err = uxo.CoinHours(now)
+ require.NoError(t, err)
+ require.Equal(t, uxo.Body.Hours, hours)
+
+ // 1000000 hours have passed, output has 1 droplet, should gain 1 coin hour
+ uxo.Body.Coins = 1
+ now = uint64(1000000*3600) + uxo.Head.Time
+ hours, err = uxo.CoinHours(now)
+ require.NoError(t, err)
+ require.Equal(t, uxo.Body.Hours+1, hours)
+
+ // 1000000 hours plus 1 second have passed, output has 1 droplet, should gain 1 coin hour
+ uxo.Body.Coins = 1
+ now = uint64(1000000*3600+1) + uxo.Head.Time
+ hours, err = uxo.CoinHours(now)
+ require.NoError(t, err)
+ require.Equal(t, uxo.Body.Hours+1, hours)
+
+ // No hours passed, using initial coin hours
+ uxo.Body.Coins = _genCoins
+ uxo.Body.Hours = _genCoinHours
+ hours, err = uxo.CoinHours(uxo.Head.Time)
+ require.NoError(t, err)
+ require.Equal(t, hours, uxo.Body.Hours)
+
+ // One hour passed, using initial coin hours
+ hours, err = uxo.CoinHours(uxo.Head.Time + 3600)
+ require.NoError(t, err)
+ require.Equal(t, hours, uxo.Body.Hours+(_genCoins/1e6))
+
+ // No hours passed and no hours to begin with
+ uxo.Body.Hours = 0
+ hours, err = uxo.CoinHours(uxo.Head.Time)
+ require.NoError(t, err)
+ require.Equal(t, hours, uint64(0))
+
+ // Centuries have passed, time-based calculation overflows uint64
+ // when calculating the whole coin seconds
+ uxo.Body.Coins = 2e6
+ _, err = uxo.CoinHours(math.MaxUint64)
+ require.Error(t, err)
+ require.True(t, strings.HasPrefix(err.Error(), "UxOut.CoinHours: Calculating whole coin seconds overflows uint64 seconds=18446744073709551515 coins=2 uxid="))
+
+ // Centuries have passed, time-based calculation overflows uint64
+ // when calculating the droplet seconds
+ uxo.Body.Coins = 1e6 + 1e5
+ _, err = uxo.CoinHours(math.MaxUint64)
+ require.Error(t, err)
+ require.True(t, strings.HasPrefix(err.Error(), "UxOut.CoinHours: Calculating droplet seconds overflows uint64 seconds=18446744073709551515 droplets=100000 uxid="))
+
+ // Output would overflow if given more hours, has reached its limit
+ uxo.Body.Coins = 3600e6
+ uxo.Body.Hours = math.MaxUint64 - 1
+ _, err = uxo.CoinHours(uxo.Head.Time + 1000)
+ testutil.RequireError(t, err, ErrAddEarnedCoinHoursAdditionOverflow.Error())
+}
+
+func makeUxArray(t *testing.T, n int) UxArray {
+ uxa := make(UxArray, n)
+ for i := 0; i < len(uxa); i++ {
+ uxa[i] = makeUxOut(t)
+ }
+ return uxa
+}
+
+func TestUxArrayCoins(t *testing.T) {
+ uxa := makeUxArray(t, 4)
+
+ n, err := uxa.Coins()
+ require.NoError(t, err)
+ require.Equal(t, uint64(4e6), n)
+
+ uxa[2].Body.Coins = math.MaxUint64 - 1e6
+ _, err = uxa.Coins()
+ require.Equal(t, err, errors.New("UxArray.Coins addition overflow"))
+}
+
+func TestUxArrayCoinHours(t *testing.T) {
+ uxa := makeUxArray(t, 4)
+
+ n, err := uxa.CoinHours(uxa[0].Head.Time)
+ require.NoError(t, err)
+ require.Equal(t, uint64(400), n)
+
+ // 1 hour later
+ n, err = uxa.CoinHours(uxa[0].Head.Time + 3600)
+ require.NoError(t, err)
+ require.Equal(t, uint64(404), n)
+
+ // 1.5 hours later
+ n, err = uxa.CoinHours(uxa[0].Head.Time + 3600 + 1800)
+ require.NoError(t, err)
+ require.Equal(t, uint64(404), n)
+
+ // 2 hours later
+ n, err = uxa.CoinHours(uxa[0].Head.Time + 3600 + 4600)
+ require.NoError(t, err)
+ require.Equal(t, uint64(408), n)
+
+ uxa[2].Body.Hours = math.MaxUint64 - 100
+ _, err = uxa.CoinHours(uxa[0].Head.Time)
+ require.Equal(t, errors.New("UxArray.CoinHours addition overflow"), err)
+
+ _, err = uxa.CoinHours(uxa[0].Head.Time * 1000000000000)
+ require.Equal(t, ErrAddEarnedCoinHoursAdditionOverflow, err)
+}
+
+func TestUxArrayHashArray(t *testing.T) {
+ uxa := makeUxArray(t, 4)
+ hashes := uxa.Hashes()
+ assert.Equal(t, len(hashes), len(uxa))
+ for i, h := range hashes {
+ assert.Equal(t, h, uxa[i].Hash())
+ }
+}
+
+func TestUxArrayHasDupes(t *testing.T) {
+ uxa := makeUxArray(t, 4)
+ assert.False(t, uxa.HasDupes())
+ uxa[0] = uxa[1]
+ assert.True(t, uxa.HasDupes())
+}
+
+func TestUxArrayRemoveDupes(t *testing.T) {
+ uxa := makeUxArray(t, 4)
+ assert.False(t, uxa.HasDupes())
+ assert.Equal(t, uxa, uxa.removeDupes())
+ uxa[0] = uxa[1]
+ assert.True(t, uxa.HasDupes())
+ uxb := uxa.removeDupes()
+ assert.False(t, uxb.HasDupes())
+ assert.Equal(t, len(uxb), 3)
+ assert.Equal(t, uxb[0], uxa[0])
+ assert.Equal(t, uxb[1], uxa[2])
+ assert.Equal(t, uxb[2], uxa[3])
+}
+
+func TestUxArraySub(t *testing.T) {
+ uxa := makeUxArray(t, 4)
+ uxb := makeUxArray(t, 4)
+ uxc := append(uxa[:1], uxb...)
+ uxc = append(uxc, uxa[1:2]...)
+
+ uxd := uxc.Sub(uxa)
+ assert.Equal(t, uxd, uxb)
+
+ uxd = uxc.Sub(uxb)
+ assert.Equal(t, len(uxd), 2)
+ assert.Equal(t, uxd, uxa[:2])
+
+ // No intersection
+ uxd = uxa.Sub(uxb)
+ assert.Equal(t, uxa, uxd)
+ uxd = uxb.Sub(uxa)
+ assert.Equal(t, uxd, uxb)
+}
+
+func manualUxArrayIsSorted(uxa UxArray) bool {
+ isSorted := true
+ for i := 0; i < len(uxa)-1; i++ {
+ hi := uxa[i].Hash()
+ hj := uxa[i+1].Hash()
+ if bytes.Compare(hi[:], hj[:]) > 0 {
+ isSorted = false
+ }
+ }
+ return isSorted
+}
+
+func TestUxArraySorting(t *testing.T) {
+ uxa := make(UxArray, 4)
+ for i := 0; i < len(uxa); i++ {
+ uxa[i] = makeUxOut(t)
+ }
+ isSorted := manualUxArrayIsSorted(uxa)
+ assert.Equal(t, sort.IsSorted(uxa), isSorted)
+ // Make sure uxa is not sorted
+ if isSorted {
+ uxa[0], uxa[1] = uxa[1], uxa[0]
+ }
+ assert.False(t, manualUxArrayIsSorted(uxa))
+ assert.False(t, sort.IsSorted(uxa))
+ uxb := make(UxArray, 4)
+ for i, ux := range uxa {
+ uxb[i] = ux
+ }
+ sort.Sort(uxa)
+ assert.True(t, sort.IsSorted(uxa))
+ assert.True(t, manualUxArrayIsSorted(uxa))
+ assert.False(t, sort.IsSorted(uxb))
+ uxb.Sort()
+ assert.Equal(t, uxa, uxb)
+ assert.True(t, sort.IsSorted(uxb))
+ assert.True(t, manualUxArrayIsSorted(uxb))
+}
+
+func TestUxArrayLen(t *testing.T) {
+ uxa := make(UxArray, 4)
+ assert.Equal(t, len(uxa), uxa.Len())
+ assert.Equal(t, 4, uxa.Len())
+}
+
+func TestUxArrayLess(t *testing.T) {
+ uxa := make(UxArray, 2)
+ uxa[0] = makeUxOut(t)
+ uxa[1] = makeUxOut(t)
+ h := make([]cipher.SHA256, 2)
+ h[0] = uxa[0].Hash()
+ h[1] = uxa[1].Hash()
+ assert.Equal(t, uxa.Less(0, 1), bytes.Compare(h[0][:], h[1][:]) < 0)
+ assert.Equal(t, uxa.Less(1, 0), bytes.Compare(h[0][:], h[1][:]) > 0)
+}
+
+func TestUxArraySwap(t *testing.T) {
+ uxa := make(UxArray, 2)
+ uxx := makeUxOut(t)
+ uxy := makeUxOut(t)
+ uxa[0] = uxx
+ uxa[1] = uxy
+ uxa.Swap(0, 1)
+ assert.Equal(t, uxa[0], uxy)
+ assert.Equal(t, uxa[1], uxx)
+ uxa.Swap(0, 1)
+ assert.Equal(t, uxa[0], uxx)
+ assert.Equal(t, uxa[1], uxy)
+ uxa.Swap(1, 0)
+ assert.Equal(t, uxa[1], uxx)
+ assert.Equal(t, uxa[0], uxy)
+}
+
+func TestAddressUxOutsKeys(t *testing.T) {
+ unspents := make(AddressUxOuts)
+ ux := makeUxOut(t)
+ ux2 := makeUxOut(t)
+ ux3 := makeUxOut(t)
+ unspents[ux.Body.Address] = UxArray{ux}
+ unspents[ux2.Body.Address] = UxArray{ux2}
+ unspents[ux3.Body.Address] = UxArray{ux3}
+ keys := unspents.Keys()
+ assert.Equal(t, len(keys), 3)
+ dupes := make(map[cipher.Address]byte, 3)
+ for _, k := range keys {
+ dupes[k] = byte(1)
+ assert.True(t, k == ux.Body.Address || k == ux2.Body.Address || k == ux3.Body.Address)
+ }
+ assert.Equal(t, len(keys), len(dupes))
+}
+
+func TestAddressUxOutsMerge(t *testing.T) {
+ unspents := make(AddressUxOuts)
+ unspents2 := make(AddressUxOuts)
+ ux := makeUxOut(t)
+ ux2 := makeUxOut(t)
+ ux3 := makeUxOut(t)
+ ux4 := makeUxOut(t)
+ ux3.Body.Address = ux.Body.Address
+
+ unspents[ux.Body.Address] = UxArray{ux}
+ unspents[ux2.Body.Address] = UxArray{ux2}
+ unspents2[ux3.Body.Address] = UxArray{ux3}
+ unspents2[ux4.Body.Address] = UxArray{ux4}
+
+ // Valid merge
+ keys := []cipher.Address{ux.Body.Address, ux2.Body.Address, ux4.Body.Address}
+ merged := unspents.Merge(unspents2, keys)
+ assert.Equal(t, len(unspents), 2)
+ assert.Equal(t, len(unspents2), 2)
+ assert.Equal(t, len(merged), 3)
+ assert.Equal(t, merged[ux.Body.Address], UxArray{ux, ux3})
+ assert.Equal(t, merged[ux2.Body.Address], UxArray{ux2})
+ assert.Equal(t, merged[ux4.Body.Address], UxArray{ux4})
+
+ // Duplicates should not be merged
+ unspents[ux4.Body.Address] = UxArray{ux4}
+ unspents[ux.Body.Address] = UxArray{ux, ux3}
+ merged = unspents.Merge(unspents2, keys)
+ assert.Equal(t, len(merged), 3)
+ assert.Equal(t, merged[ux.Body.Address], UxArray{ux, ux3})
+ assert.Equal(t, merged[ux2.Body.Address], UxArray{ux2})
+ assert.Equal(t, merged[ux4.Body.Address], UxArray{ux4})
+
+ // Missing keys should not be merged
+ merged = unspents.Merge(unspents2, []cipher.Address{})
+ assert.Equal(t, len(merged), 0)
+ merged = unspents.Merge(unspents2, []cipher.Address{ux4.Body.Address})
+ assert.Equal(t, len(merged), 1)
+ assert.Equal(t, merged[ux4.Body.Address], UxArray{ux4})
+}
+
+func TestAddressUxOutsSub(t *testing.T) {
+ up := make(AddressUxOuts)
+ up2 := make(AddressUxOuts)
+ uxs := makeUxArray(t, 4)
+
+ uxs[1].Body.Address = uxs[0].Body.Address
+ up[uxs[0].Body.Address] = UxArray{uxs[0], uxs[1]}
+ up[uxs[2].Body.Address] = UxArray{uxs[2]}
+ up[uxs[3].Body.Address] = UxArray{uxs[3]}
+
+ up2[uxs[0].Body.Address] = UxArray{uxs[0]}
+ up2[uxs[2].Body.Address] = UxArray{uxs[2]}
+
+ up3 := up.Sub(up2)
+ // One address should have been removed, because no elements
+ assert.Equal(t, len(up3), 2)
+ _, ok := up3[uxs[2].Body.Address]
+ assert.False(t, ok)
+ // Ux3 should be untouched
+ ux3 := up3[uxs[3].Body.Address]
+ assert.Equal(t, ux3, UxArray{uxs[3]})
+ // Ux0,Ux1 should be missing Ux0
+ ux1 := up3[uxs[0].Body.Address]
+ assert.Equal(t, ux1, UxArray{uxs[1]})
+
+ // Originals should be unmodified
+ assert.Equal(t, len(up), 3)
+ assert.Equal(t, len(up[uxs[0].Body.Address]), 2)
+ assert.Equal(t, len(up[uxs[2].Body.Address]), 1)
+ assert.Equal(t, len(up[uxs[3].Body.Address]), 1)
+ assert.Equal(t, len(up2), 2)
+ assert.Equal(t, len(up2[uxs[0].Body.Address]), 1)
+ assert.Equal(t, len(up2[uxs[2].Body.Address]), 1)
+}
+
+func TestAddressUxOutsAdd(t *testing.T) {
+ up := make(AddressUxOuts)
+ up2 := make(AddressUxOuts)
+ uxs := makeUxArray(t, 4)
+
+ uxs[1].Body.Address = uxs[0].Body.Address
+ up[uxs[0].Body.Address] = UxArray{uxs[0]}
+ up[uxs[2].Body.Address] = UxArray{uxs[2]}
+ up[uxs[3].Body.Address] = UxArray{uxs[3]}
+
+ up2[uxs[0].Body.Address] = UxArray{uxs[1]}
+ up2[uxs[2].Body.Address] = UxArray{uxs[2]}
+
+ up3 := up.Add(up2)
+ require.Equal(t, 3, len(up3))
+ require.Equal(t, len(up3[uxs[0].Body.Address]), 2)
+ require.Equal(t, up3[uxs[0].Body.Address], UxArray{uxs[0], uxs[1]})
+ require.Equal(t, up3[uxs[2].Body.Address], UxArray{uxs[2]})
+ require.Equal(t, up3[uxs[3].Body.Address], UxArray{uxs[3]})
+ require.Equal(t, up3[uxs[1].Body.Address], UxArray{uxs[0], uxs[1]})
+
+ // Originals should be unmodified
+ assert.Equal(t, len(up), 3)
+ assert.Equal(t, len(up[uxs[0].Body.Address]), 1)
+ assert.Equal(t, len(up[uxs[2].Body.Address]), 1)
+ assert.Equal(t, len(up[uxs[3].Body.Address]), 1)
+ assert.Equal(t, len(up2), 2)
+ assert.Equal(t, len(up2[uxs[0].Body.Address]), 1)
+ assert.Equal(t, len(up2[uxs[2].Body.Address]), 1)
+}
+
+func TestAddressUxOutsFlatten(t *testing.T) {
+ up := make(AddressUxOuts)
+ uxs := makeUxArray(t, 3)
+ uxs[2].Body.Address = uxs[1].Body.Address
+ emptyAddr := makeAddress()
+
+ // An empty array
+ up[emptyAddr] = UxArray{}
+ // 1 element array
+ up[uxs[0].Body.Address] = UxArray{uxs[0]}
+ // 2 element array
+ up[uxs[1].Body.Address] = UxArray{uxs[1], uxs[2]}
+
+ flat := up.Flatten()
+ assert.Equal(t, len(flat), 3)
+ // emptyAddr should not be in the array
+ for _, ux := range flat {
+ assert.NotEqual(t, ux.Body.Address, emptyAddr)
+ }
+ if flat[0].Body.Address == uxs[0].Body.Address {
+ assert.Equal(t, flat[0], uxs[0])
+ assert.Equal(t, flat[0].Body.Address, uxs[0].Body.Address)
+ assert.Equal(t, flat[0+1], uxs[1])
+ assert.Equal(t, flat[1+1], uxs[2])
+ assert.Equal(t, flat[0+1].Body.Address, uxs[1].Body.Address)
+ assert.Equal(t, flat[1+1].Body.Address, uxs[2].Body.Address)
+ } else {
+ assert.Equal(t, flat[0], uxs[1])
+ assert.Equal(t, flat[1], uxs[2])
+ assert.Equal(t, flat[0].Body.Address, uxs[1].Body.Address)
+ assert.Equal(t, flat[1].Body.Address, uxs[2].Body.Address)
+ assert.Equal(t, flat[2], uxs[0])
+ assert.Equal(t, flat[2].Body.Address, uxs[0].Body.Address)
+ }
+}
+
+func TestNewAddressUxOuts(t *testing.T) {
+ uxs := makeUxArray(t, 6)
+ uxs[1].Body.Address = uxs[0].Body.Address
+ uxs[3].Body.Address = uxs[2].Body.Address
+ uxs[4].Body.Address = uxs[2].Body.Address
+ uxo := NewAddressUxOuts(uxs)
+ assert.Equal(t, len(uxo), 3)
+ assert.Equal(t, uxo[uxs[0].Body.Address], UxArray{
+ uxs[0], uxs[1],
+ })
+ assert.Equal(t, uxo[uxs[3].Body.Address], UxArray{
+ uxs[2], uxs[3], uxs[4],
+ })
+ assert.Equal(t, uxo[uxs[5].Body.Address], UxArray{
+ uxs[5],
+ })
+}
+
+/*
+ Utility Functions
+*/
+
+// Returns a copy of self with duplicates removed
+// Is this needed?
+func (ua UxArray) removeDupes() UxArray {
+ m := make(UxHashSet, len(ua))
+ deduped := make(UxArray, 0, len(ua))
+ for i := range ua {
+ h := ua[i].Hash()
+ if _, ok := m[h]; !ok {
+ deduped = append(deduped, ua[i])
+ m[h] = struct{}{}
+ }
+ }
+ return deduped
+}
+
+// Combines two AddressUxOuts where they overlap with keys
+// Remove?
+func (auo AddressUxOuts) Merge(other AddressUxOuts,
+ keys []cipher.Address) AddressUxOuts {
+ final := make(AddressUxOuts, len(keys))
+ for _, a := range keys {
+ row := append(auo[a], other[a]...)
+ final[a] = row.removeDupes()
+ }
+ return final
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/coin/transactions_test.go b/vendor/github.com/skycoin/skycoin/src/coin/transactions_test.go
new file mode 100644
index 0000000..29c5e62
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/coin/transactions_test.go
@@ -0,0 +1,953 @@
+package coin
+
+import (
+ "bytes"
+ "errors"
+ "math"
+ "sort"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/skycoin/skycoin/src/cipher"
+ "github.com/skycoin/skycoin/src/cipher/encoder"
+ "github.com/skycoin/skycoin/src/testutil"
+ _require "github.com/skycoin/skycoin/src/testutil/require"
+)
+
+func makeTransactionFromUxOut(ux UxOut, s cipher.SecKey) Transaction {
+ tx := Transaction{}
+ tx.PushInput(ux.Hash())
+ tx.PushOutput(makeAddress(), 1e6, 50)
+ tx.PushOutput(makeAddress(), 5e6, 50)
+ tx.SignInputs([]cipher.SecKey{s})
+ tx.UpdateHeader()
+ return tx
+}
+
+func makeTransaction(t *testing.T) Transaction {
+ ux, s := makeUxOutWithSecret(t)
+ return makeTransactionFromUxOut(ux, s)
+}
+
+func makeTransactions(t *testing.T, n int) Transactions { // nolint: unparam
+ txns := make(Transactions, n)
+ for i := range txns {
+ txns[i] = makeTransaction(t)
+ }
+ return txns
+}
+
+func makeAddress() cipher.Address {
+ p, _ := cipher.GenerateKeyPair()
+ return cipher.AddressFromPubKey(p)
+}
+
+func copyTransaction(tx Transaction) Transaction {
+ txo := Transaction{}
+ txo.Length = tx.Length
+ txo.Type = tx.Type
+ txo.InnerHash = tx.InnerHash
+ txo.Sigs = make([]cipher.Sig, len(tx.Sigs))
+ copy(txo.Sigs, tx.Sigs)
+ txo.In = make([]cipher.SHA256, len(tx.In))
+ copy(txo.In, tx.In)
+ txo.Out = make([]TransactionOutput, len(tx.Out))
+ copy(txo.Out, tx.Out)
+ return txo
+}
+
+func TestTransactionVerify(t *testing.T) {
+ // Mismatch header hash
+ tx := makeTransaction(t)
+ tx.InnerHash = cipher.SHA256{}
+ testutil.RequireError(t, tx.Verify(), "Invalid header hash")
+
+ // No inputs
+ tx = makeTransaction(t)
+ tx.In = make([]cipher.SHA256, 0)
+ tx.UpdateHeader()
+ testutil.RequireError(t, tx.Verify(), "No inputs")
+
+ // No outputs
+ tx = makeTransaction(t)
+ tx.Out = make([]TransactionOutput, 0)
+ tx.UpdateHeader()
+ testutil.RequireError(t, tx.Verify(), "No outputs")
+
+ // Invalid number of sigs
+ tx = makeTransaction(t)
+ tx.Sigs = make([]cipher.Sig, 0)
+ tx.UpdateHeader()
+ testutil.RequireError(t, tx.Verify(), "Invalid number of signatures")
+ tx.Sigs = make([]cipher.Sig, 20)
+ tx.UpdateHeader()
+ testutil.RequireError(t, tx.Verify(), "Invalid number of signatures")
+
+ // Too many sigs & inputs
+ tx = makeTransaction(t)
+ tx.Sigs = make([]cipher.Sig, math.MaxUint16)
+ tx.In = make([]cipher.SHA256, math.MaxUint16)
+ tx.UpdateHeader()
+ testutil.RequireError(t, tx.Verify(), "Too many signatures and inputs")
+
+ // Duplicate inputs
+ ux, s := makeUxOutWithSecret(t)
+ tx = makeTransactionFromUxOut(ux, s)
+ tx.PushInput(tx.In[0])
+ tx.Sigs = nil
+ tx.SignInputs([]cipher.SecKey{s, s})
+ tx.UpdateHeader()
+ testutil.RequireError(t, tx.Verify(), "Duplicate spend")
+
+ // Duplicate outputs
+ tx = makeTransaction(t)
+ to := tx.Out[0]
+ tx.PushOutput(to.Address, to.Coins, to.Hours)
+ tx.UpdateHeader()
+ testutil.RequireError(t, tx.Verify(), "Duplicate output in transaction")
+
+ // Invalid signature, empty
+ tx = makeTransaction(t)
+ tx.Sigs[0] = cipher.Sig{}
+ testutil.RequireError(t, tx.Verify(), "Failed to recover public key")
+ // We can't check here for other invalid signatures:
+ // - Signatures signed by someone else, spending coins they don't own
+ // - Signature is for wrong hash
+ // This must be done by blockchain tests, because we need the address
+ // from the unspent being spent
+
+ // Output coins are 0
+ tx = makeTransaction(t)
+ tx.Out[0].Coins = 0
+ tx.UpdateHeader()
+ testutil.RequireError(t, tx.Verify(), "Zero coin output")
+
+ // Output coin overflow
+ tx = makeTransaction(t)
+ tx.Out[0].Coins = math.MaxUint64 - 3e6
+ tx.UpdateHeader()
+ testutil.RequireError(t, tx.Verify(), "Output coins overflow")
+
+ // Output coins are not multiples of 1e6 (valid, decimal restriction is not enforced here)
+ tx = makeTransaction(t)
+ tx.Out[0].Coins += 10
+ tx.UpdateHeader()
+ tx.Sigs = nil
+ tx.SignInputs([]cipher.SecKey{genSecret})
+ require.NotEqual(t, tx.Out[0].Coins%1e6, uint64(0))
+ require.NoError(t, tx.Verify())
+
+ // Valid
+ tx = makeTransaction(t)
+ tx.Out[0].Coins = 10e6
+ tx.Out[1].Coins = 1e6
+ tx.UpdateHeader()
+ require.Nil(t, tx.Verify())
+}
+
+func TestTransactionVerifyInput(t *testing.T) {
+ // Invalid uxIn args
+ tx := makeTransaction(t)
+ _require.PanicsWithLogMessage(t, "tx.In != uxIn", func() {
+ tx.VerifyInput(nil)
+ })
+ _require.PanicsWithLogMessage(t, "tx.In != uxIn", func() {
+ tx.VerifyInput(UxArray{})
+ })
+ _require.PanicsWithLogMessage(t, "tx.In != uxIn", func() {
+ tx.VerifyInput(make(UxArray, 3))
+ })
+
+ // tx.In != tx.Sigs
+ ux, s := makeUxOutWithSecret(t)
+ tx = makeTransactionFromUxOut(ux, s)
+ tx.Sigs = []cipher.Sig{}
+ _require.PanicsWithLogMessage(t, "tx.In != tx.Sigs", func() {
+ tx.VerifyInput(UxArray{ux})
+ })
+
+ ux, s = makeUxOutWithSecret(t)
+ tx = makeTransactionFromUxOut(ux, s)
+ tx.Sigs = append(tx.Sigs, cipher.Sig{})
+ _require.PanicsWithLogMessage(t, "tx.In != tx.Sigs", func() {
+ tx.VerifyInput(UxArray{ux})
+ })
+
+ // tx.InnerHash != tx.HashInner()
+ ux, s = makeUxOutWithSecret(t)
+ tx = makeTransactionFromUxOut(ux, s)
+ tx.InnerHash = cipher.SHA256{}
+ _require.PanicsWithLogMessage(t, "Invalid Tx Inner Hash", func() {
+ tx.VerifyInput(UxArray{ux})
+ })
+
+ // tx.In does not match uxIn hashes
+ ux, s = makeUxOutWithSecret(t)
+ tx = makeTransactionFromUxOut(ux, s)
+ _require.PanicsWithLogMessage(t, "Ux hash mismatch", func() {
+ tx.VerifyInput(UxArray{UxOut{}})
+ })
+
+ // Invalid signature
+ ux, s = makeUxOutWithSecret(t)
+ tx = makeTransactionFromUxOut(ux, s)
+ tx.Sigs[0] = cipher.Sig{}
+ err := tx.VerifyInput(UxArray{ux})
+ testutil.RequireError(t, err, "Signature not valid for output being spent")
+
+ // Valid
+ ux, s = makeUxOutWithSecret(t)
+ tx = makeTransactionFromUxOut(ux, s)
+ err = tx.VerifyInput(UxArray{ux})
+ require.NoError(t, err)
+}
+
+func TestTransactionPushInput(t *testing.T) {
+ tx := &Transaction{}
+ ux := makeUxOut(t)
+ require.Equal(t, tx.PushInput(ux.Hash()), uint16(0))
+ require.Equal(t, len(tx.In), 1)
+ require.Equal(t, tx.In[0], ux.Hash())
+ tx.In = append(tx.In, make([]cipher.SHA256, math.MaxUint16)...)
+ ux = makeUxOut(t)
+ require.Panics(t, func() { tx.PushInput(ux.Hash()) })
+}
+
+func TestTransactionPushOutput(t *testing.T) {
+ tx := &Transaction{}
+ a := makeAddress()
+ tx.PushOutput(a, 100, 150)
+ require.Equal(t, len(tx.Out), 1)
+ require.Equal(t, tx.Out[0], TransactionOutput{
+ Address: a,
+ Coins: 100,
+ Hours: 150,
+ })
+ for i := 1; i < 20; i++ {
+ a := makeAddress()
+ tx.PushOutput(a, uint64(i*100), uint64(i*50))
+ require.Equal(t, len(tx.Out), i+1)
+ require.Equal(t, tx.Out[i], TransactionOutput{
+ Address: a,
+ Coins: uint64(i * 100),
+ Hours: uint64(i * 50),
+ })
+ }
+}
+
+func TestTransactionSignInputs(t *testing.T) {
+ tx := &Transaction{}
+ // Panics if txns already signed
+ tx.Sigs = append(tx.Sigs, cipher.Sig{})
+ require.Panics(t, func() { tx.SignInputs([]cipher.SecKey{}) })
+ // Panics if not enough keys
+ tx = &Transaction{}
+ ux, s := makeUxOutWithSecret(t)
+ tx.PushInput(ux.Hash())
+ ux2, s2 := makeUxOutWithSecret(t)
+ tx.PushInput(ux2.Hash())
+ tx.PushOutput(makeAddress(), 40, 80)
+ require.Equal(t, len(tx.Sigs), 0)
+ require.Panics(t, func() { tx.SignInputs([]cipher.SecKey{s}) })
+ require.Equal(t, len(tx.Sigs), 0)
+ // Valid signing
+ h := tx.HashInner()
+ require.NotPanics(t, func() { tx.SignInputs([]cipher.SecKey{s, s2}) })
+ require.Equal(t, len(tx.Sigs), 2)
+ require.Equal(t, tx.HashInner(), h)
+ p := cipher.PubKeyFromSecKey(s)
+ a := cipher.AddressFromPubKey(p)
+ p = cipher.PubKeyFromSecKey(s2)
+ a2 := cipher.AddressFromPubKey(p)
+ require.Nil(t, cipher.ChkSig(a, cipher.AddSHA256(h, tx.In[0]), tx.Sigs[0]))
+ require.Nil(t, cipher.ChkSig(a2, cipher.AddSHA256(h, tx.In[1]), tx.Sigs[1]))
+ require.NotNil(t, cipher.ChkSig(a, h, tx.Sigs[1]))
+ require.NotNil(t, cipher.ChkSig(a2, h, tx.Sigs[0]))
+}
+
+func TestTransactionHash(t *testing.T) {
+ tx := makeTransaction(t)
+ require.NotEqual(t, tx.Hash(), cipher.SHA256{})
+ require.NotEqual(t, tx.HashInner(), tx.Hash())
+}
+
+func TestTransactionUpdateHeader(t *testing.T) {
+ tx := makeTransaction(t)
+ h := tx.InnerHash
+ tx.InnerHash = cipher.SHA256{}
+ tx.UpdateHeader()
+ require.NotEqual(t, tx.InnerHash, cipher.SHA256{})
+ require.Equal(t, tx.InnerHash, h)
+ require.Equal(t, tx.InnerHash, tx.HashInner())
+}
+
+func TestTransactionHashInner(t *testing.T) {
+ tx := makeTransaction(t)
+
+ h := tx.HashInner()
+ require.NotEqual(t, h, cipher.SHA256{})
+
+ // If tx.In is changed, hash should change
+ tx2 := copyTransaction(tx)
+ ux := makeUxOut(t)
+ tx2.In[0] = ux.Hash()
+ require.NotEqual(t, tx, tx2)
+ require.Equal(t, tx2.In[0], ux.Hash())
+ require.NotEqual(t, tx.HashInner(), tx2.HashInner())
+
+ // If tx.Out is changed, hash should change
+ tx2 = copyTransaction(tx)
+ a := makeAddress()
+ tx2.Out[0].Address = a
+ require.NotEqual(t, tx, tx2)
+ require.Equal(t, tx2.Out[0].Address, a)
+ require.NotEqual(t, tx.HashInner(), tx2.HashInner())
+
+ // If tx.Head is changed, hash should not change
+ tx2 = copyTransaction(tx)
+ tx.Sigs = append(tx.Sigs, cipher.Sig{})
+ require.Equal(t, tx.HashInner(), tx2.HashInner())
+}
+
+func TestTransactionSerialization(t *testing.T) {
+ tx := makeTransaction(t)
+ b := tx.Serialize()
+ tx2, err := TransactionDeserialize(b)
+ require.NoError(t, err)
+ require.Equal(t, tx, tx2)
+ // Invalid deserialization
+ require.Panics(t, func() { MustTransactionDeserialize([]byte{0x04}) })
+}
+
+func TestTransactionOutputHours(t *testing.T) {
+ tx := Transaction{}
+ tx.PushOutput(makeAddress(), 1e6, 100)
+ tx.PushOutput(makeAddress(), 1e6, 200)
+ tx.PushOutput(makeAddress(), 1e6, 500)
+ tx.PushOutput(makeAddress(), 1e6, 0)
+ hours, err := tx.OutputHours()
+ require.NoError(t, err)
+ require.Equal(t, hours, uint64(800))
+
+ tx.PushOutput(makeAddress(), 1e6, math.MaxUint64-700)
+ _, err = tx.OutputHours()
+ testutil.RequireError(t, err, "Transaction output hours overflow")
+}
+
+type outAddr struct {
+ Addr cipher.Address
+ Coins uint64
+ Hours uint64
+}
+
+func makeTx(s cipher.SecKey, ux *UxOut, outs []outAddr, tm uint64, seq uint64) (*Transaction, UxArray, error) {
+ if ux == nil {
+ // genesis block tx.
+ tx := Transaction{}
+ tx.PushOutput(outs[0].Addr, outs[0].Coins, outs[0].Hours)
+ _, s = cipher.GenerateKeyPair()
+ ux := UxOut{
+ Head: UxHead{
+ Time: 100,
+ BkSeq: 0,
+ },
+ Body: UxBody{
+ SrcTransaction: tx.InnerHash,
+ Address: outs[0].Addr,
+ Coins: outs[0].Coins,
+ Hours: outs[0].Hours,
+ },
+ }
+ return &tx, []UxOut{ux}, nil
+ }
+
+ tx := Transaction{}
+ tx.PushInput(ux.Hash())
+ tx.SignInputs([]cipher.SecKey{s})
+ for _, o := range outs {
+ tx.PushOutput(o.Addr, o.Coins, o.Hours)
+ }
+ tx.UpdateHeader()
+
+ uxo := make(UxArray, len(tx.Out))
+ for i := range tx.Out {
+ uxo[i] = UxOut{
+ Head: UxHead{
+ Time: tm,
+ BkSeq: seq,
+ },
+ Body: UxBody{
+ SrcTransaction: tx.Hash(),
+ Address: tx.Out[i].Address,
+ Coins: tx.Out[i].Coins,
+ Hours: tx.Out[i].Hours,
+ },
+ }
+ }
+ return &tx, uxo, nil
+}
+
+func TestTransactionsSize(t *testing.T) {
+ txns := makeTransactions(t, 10)
+ size := 0
+ for _, tx := range txns {
+ size += len(encoder.Serialize(&tx))
+ }
+ require.NotEqual(t, size, 0)
+ require.Equal(t, txns.Size(), size)
+}
+
+func TestTransactionsHashes(t *testing.T) {
+ txns := make(Transactions, 4)
+ for i := 0; i < len(txns); i++ {
+ txns[i] = makeTransaction(t)
+ }
+ hashes := txns.Hashes()
+ require.Equal(t, len(hashes), 4)
+ for i, h := range hashes {
+ require.Equal(t, h, txns[i].Hash())
+ }
+}
+
+func TestTransactionsTruncateBytesTo(t *testing.T) {
+ txns := makeTransactions(t, 10)
+ trunc := 0
+ for i := 0; i < len(txns)/2; i++ {
+ trunc += txns[i].Size()
+ }
+ // Truncating halfway
+ txns2 := txns.TruncateBytesTo(trunc)
+ require.Equal(t, len(txns2), len(txns)/2)
+ require.Equal(t, txns2.Size(), trunc)
+
+ // Stepping into next boundary has same cutoff, must exceed
+ trunc++
+ txns2 = txns.TruncateBytesTo(trunc)
+ require.Equal(t, len(txns2), len(txns)/2)
+ require.Equal(t, txns2.Size(), trunc-1)
+
+ // Moving to 1 before next level
+ trunc += txns[5].Size() - 2
+ txns2 = txns.TruncateBytesTo(trunc)
+ require.Equal(t, len(txns2), len(txns)/2)
+ require.Equal(t, txns2.Size(), trunc-txns[5].Size()+1)
+
+ // Moving to next level
+ trunc++
+ txns2 = txns.TruncateBytesTo(trunc)
+ require.Equal(t, len(txns2), len(txns)/2+1)
+ require.Equal(t, txns2.Size(), trunc)
+
+ // Truncating to full available amt
+ trunc = txns.Size()
+ txns2 = txns.TruncateBytesTo(trunc)
+ require.Equal(t, txns, txns2)
+ require.Equal(t, txns2.Size(), trunc)
+
+ // Truncating over amount
+ trunc++
+ txns2 = txns.TruncateBytesTo(trunc)
+ require.Equal(t, txns, txns2)
+ require.Equal(t, txns2.Size(), trunc-1)
+
+ // Truncating to 0
+ trunc = 0
+ txns2 = txns.TruncateBytesTo(0)
+ require.Equal(t, len(txns2), 0)
+ require.Equal(t, txns2.Size(), trunc)
+}
+
+func TestVerifyTransactionCoinsSpending(t *testing.T) {
+ // Input coins overflow
+ // Insufficient coins
+ // Destroy coins
+
+ type ux struct {
+ coins uint64
+ hours uint64
+ }
+
+ cases := []struct {
+ name string
+ inUxs []ux
+ outUxs []ux
+ err error
+ }{
+ {
+ name: "Input coins overflow",
+ inUxs: []ux{
+ {
+ coins: math.MaxUint64 - 1e6 + 1,
+ hours: 10,
+ },
+ {
+ coins: 1e6,
+ hours: 0,
+ },
+ },
+ err: errors.New("Transaction input coins overflow"),
+ },
+
+ {
+ name: "Output coins overflow",
+ inUxs: []ux{
+ {
+ coins: 10e6,
+ hours: 10,
+ },
+ },
+ outUxs: []ux{
+ {
+ coins: math.MaxUint64 - 10e6 + 1,
+ hours: 0,
+ },
+ {
+ coins: 20e6,
+ hours: 1,
+ },
+ },
+ err: errors.New("Transaction output coins overflow"),
+ },
+
+ {
+ name: "Insufficient coins",
+ inUxs: []ux{
+ {
+ coins: 10e6,
+ hours: 10,
+ },
+ {
+ coins: 15e6,
+ hours: 10,
+ },
+ },
+ outUxs: []ux{
+ {
+ coins: 20e6,
+ hours: 1,
+ },
+ {
+ coins: 10e6,
+ hours: 1,
+ },
+ },
+ err: errors.New("Insufficient coins"),
+ },
+
+ {
+ name: "Destroyed coins",
+ inUxs: []ux{
+ {
+ coins: 10e6,
+ hours: 10,
+ },
+ {
+ coins: 15e6,
+ hours: 10,
+ },
+ },
+ outUxs: []ux{
+ {
+ coins: 5e6,
+ hours: 1,
+ },
+ {
+ coins: 10e6,
+ hours: 1,
+ },
+ },
+ err: errors.New("Transactions may not destroy coins"),
+ },
+
+ {
+ name: "valid",
+ inUxs: []ux{
+ {
+ coins: 10e6,
+ hours: 10,
+ },
+ {
+ coins: 15e6,
+ hours: 10,
+ },
+ },
+ outUxs: []ux{
+ {
+ coins: 10e6,
+ hours: 11,
+ },
+ {
+ coins: 10e6,
+ hours: 1,
+ },
+ {
+ coins: 5e6,
+ hours: 0,
+ },
+ },
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ var uxIn, uxOut UxArray
+
+ for _, ch := range tc.inUxs {
+ uxIn = append(uxIn, UxOut{
+ Body: UxBody{
+ Coins: ch.coins,
+ Hours: ch.hours,
+ },
+ })
+ }
+
+ for _, ch := range tc.outUxs {
+ uxOut = append(uxOut, UxOut{
+ Body: UxBody{
+ Coins: ch.coins,
+ Hours: ch.hours,
+ },
+ })
+ }
+
+ err := VerifyTransactionCoinsSpending(uxIn, uxOut)
+ require.Equal(t, tc.err, err)
+ })
+ }
+}
+
+func TestVerifyTransactionHoursSpending(t *testing.T) {
+ // Input hours overflow
+ // Insufficient hours
+ // NOTE: does not check for hours overflow, that had to be moved to soft constraints
+ // NOTE: if uxIn.CoinHours() fails during the addition of earned hours to base hours,
+ // the error is ignored and treated as 0 hours
+
+ type ux struct {
+ coins uint64
+ hours uint64
+ }
+
+ cases := []struct {
+ name string
+ inUxs []ux
+ outUxs []ux
+ headTime uint64
+ err string
+ }{
+ {
+ name: "Input hours overflow",
+ inUxs: []ux{
+ {
+ coins: 3e6,
+ hours: math.MaxUint64 - 1e6 + 1,
+ },
+ {
+ coins: 1e6,
+ hours: 1e6,
+ },
+ },
+ err: "Transaction input hours overflow",
+ },
+
+ {
+ name: "Insufficient coin hours",
+ inUxs: []ux{
+ {
+ coins: 10e6,
+ hours: 10,
+ },
+ {
+ coins: 15e6,
+ hours: 10,
+ },
+ },
+ outUxs: []ux{
+ {
+ coins: 15e6,
+ hours: 10,
+ },
+ {
+ coins: 10e6,
+ hours: 11,
+ },
+ },
+ err: "Insufficient coin hours",
+ },
+
+ {
+ name: "coin hours time calculation overflow",
+ inUxs: []ux{
+ {
+ coins: 10e6,
+ hours: 10,
+ },
+ {
+ coins: 15e6,
+ hours: 10,
+ },
+ },
+ outUxs: []ux{
+ {
+ coins: 10e6,
+ hours: 11,
+ },
+ {
+ coins: 10e6,
+ hours: 1,
+ },
+ {
+ coins: 5e6,
+ hours: 0,
+ },
+ },
+ headTime: math.MaxUint64,
+ err: "UxOut.CoinHours: Calculating whole coin seconds overflows uint64 seconds=18446744073709551615 coins=10 uxid=",
+ },
+
+ {
+ name: "Invalid (coin hours overflow when adding earned hours, which is treated as 0, and now enough coin hours)",
+ headTime: 1e6,
+ inUxs: []ux{
+ {
+ coins: 10e6,
+ hours: math.MaxUint64,
+ },
+ },
+ outUxs: []ux{
+ {
+ coins: 10e6,
+ hours: 1,
+ },
+ },
+ err: "Insufficient coin hours",
+ },
+
+ {
+ name: "Valid (coin hours overflow when adding earned hours, which is treated as 0, but not sending any hours)",
+ headTime: 1e6,
+ inUxs: []ux{
+ {
+ coins: 10e6,
+ hours: math.MaxUint64,
+ },
+ },
+ outUxs: []ux{
+ {
+ coins: 10e6,
+ hours: 0,
+ },
+ },
+ },
+
+ {
+ name: "Valid (base inputs have insufficient coin hours, but have sufficient after adjusting coinhours by headTime)",
+ inUxs: []ux{
+ {
+ coins: 10e6,
+ hours: 10,
+ },
+ {
+ coins: 15e6,
+ hours: 10,
+ },
+ },
+ outUxs: []ux{
+ {
+ coins: 15e6,
+ hours: 10,
+ },
+ {
+ coins: 10e6,
+ hours: 11,
+ },
+ },
+ headTime: 1492707255,
+ },
+
+ {
+ name: "valid",
+ inUxs: []ux{
+ {
+ coins: 10e6,
+ hours: 10,
+ },
+ {
+ coins: 15e6,
+ hours: 10,
+ },
+ },
+ outUxs: []ux{
+ {
+ coins: 10e6,
+ hours: 11,
+ },
+ {
+ coins: 10e6,
+ hours: 1,
+ },
+ {
+ coins: 5e6,
+ hours: 0,
+ },
+ },
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ var uxIn, uxOut UxArray
+
+ for _, ch := range tc.inUxs {
+ uxIn = append(uxIn, UxOut{
+ Body: UxBody{
+ Coins: ch.coins,
+ Hours: ch.hours,
+ },
+ })
+ }
+
+ for _, ch := range tc.outUxs {
+ uxOut = append(uxOut, UxOut{
+ Body: UxBody{
+ Coins: ch.coins,
+ Hours: ch.hours,
+ },
+ })
+ }
+
+ err := VerifyTransactionHoursSpending(tc.headTime, uxIn, uxOut)
+ if tc.err == "" {
+ require.NoError(t, err)
+ } else {
+ require.Error(t, err)
+ require.True(t, strings.HasPrefix(err.Error(), tc.err))
+ }
+ })
+ }
+}
+
+func TestTransactionsFees(t *testing.T) {
+ calc := func(tx *Transaction) (uint64, error) {
+ return 1, nil
+ }
+
+ var txns Transactions
+
+ // Nil txns
+ fee, err := txns.Fees(calc)
+ require.NoError(t, err)
+ require.Equal(t, uint64(0), fee)
+
+ txns = append(txns, Transaction{})
+ txns = append(txns, Transaction{})
+
+ // 2 transactions, calc() always returns 1
+ fee, err = txns.Fees(calc)
+ require.NoError(t, err)
+ require.Equal(t, uint64(2), fee)
+
+ // calc error
+ failingCalc := func(tx *Transaction) (uint64, error) {
+ return 0, errors.New("bad calc")
+ }
+ _, err = txns.Fees(failingCalc)
+ testutil.RequireError(t, err, "bad calc")
+
+ // summing of calculated fees overflows
+ overflowCalc := func(tx *Transaction) (uint64, error) {
+ return math.MaxUint64, nil
+ }
+
+ _, err = txns.Fees(overflowCalc)
+ testutil.RequireError(t, err, "Transactions fee totals overflow")
+}
+
+func TestSortTransactions(t *testing.T) {
+ n := 6
+ var txns Transactions
+ for i := 0; i < n; i++ {
+ txn := Transaction{}
+ txn.PushOutput(makeAddress(), 1e6, uint64(i*1e3))
+ txn.UpdateHeader()
+ txns = append(txns, txn)
+ }
+
+ var hashSortedTxns Transactions
+ for _, txn := range txns {
+ hashSortedTxns = append(hashSortedTxns, txn)
+ }
+
+ sort.Slice(hashSortedTxns, func(i, j int) bool {
+ ihash := hashSortedTxns[i].Hash()
+ jhash := hashSortedTxns[j].Hash()
+ return bytes.Compare(ihash[:], jhash[:]) < 0
+ })
+
+ cases := []struct {
+ name string
+ feeCalc FeeCalculator
+ txns Transactions
+ sortedTxns Transactions
+ }{
+ {
+ name: "already sorted",
+ txns: Transactions{txns[0], txns[1]},
+ sortedTxns: Transactions{txns[0], txns[1]},
+ feeCalc: func(txn *Transaction) (uint64, error) {
+ return 1e8 - txn.Out[0].Hours, nil
+ },
+ },
+
+ {
+ name: "reverse sorted",
+ txns: Transactions{txns[1], txns[0]},
+ sortedTxns: Transactions{txns[0], txns[1]},
+ feeCalc: func(txn *Transaction) (uint64, error) {
+ return 1e8 - txn.Out[0].Hours, nil
+ },
+ },
+
+ {
+ name: "hash tiebreaker",
+ txns: Transactions{hashSortedTxns[1], hashSortedTxns[0]},
+ sortedTxns: Transactions{hashSortedTxns[0], hashSortedTxns[1]},
+ feeCalc: func(txn *Transaction) (uint64, error) {
+ return 1e8, nil
+ },
+ },
+
+ {
+ name: "invalid fee multiplication is capped",
+ txns: Transactions{txns[1], txns[2], txns[0]},
+ sortedTxns: Transactions{txns[2], txns[0], txns[1]},
+ feeCalc: func(txn *Transaction) (uint64, error) {
+ if txn.Hash() == txns[2].Hash() {
+ return math.MaxUint64 / 2, nil
+ }
+ return 1e8 - txn.Out[0].Hours, nil
+ },
+ },
+
+ {
+ name: "failed fee calc is filtered",
+ txns: Transactions{txns[1], txns[2], txns[0]},
+ sortedTxns: Transactions{txns[0], txns[1]},
+ feeCalc: func(txn *Transaction) (uint64, error) {
+ if txn.Hash() == txns[2].Hash() {
+ return 0, errors.New("fee calc failed")
+ }
+ return 1e8 - txn.Out[0].Hours, nil
+ },
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ txns := SortTransactions(tc.txns, tc.feeCalc)
+ require.Equal(t, tc.sortedTxns, txns)
+ })
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/args_test.go b/vendor/github.com/spf13/cobra/args_test.go
new file mode 100644
index 0000000..d797b6f
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/args_test.go
@@ -0,0 +1,241 @@
+package cobra
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestNoArgs(t *testing.T) {
+ c := &Command{Use: "c", Args: NoArgs, Run: emptyRun}
+
+ output, err := executeCommand(c)
+ if output != "" {
+ t.Errorf("Unexpected string: %v", output)
+ }
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+}
+
+func TestNoArgsWithArgs(t *testing.T) {
+ c := &Command{Use: "c", Args: NoArgs, Run: emptyRun}
+
+ _, err := executeCommand(c, "illegal")
+ if err == nil {
+ t.Fatal("Expected an error")
+ }
+
+ got := err.Error()
+ expected := `unknown command "illegal" for "c"`
+ if got != expected {
+ t.Errorf("Expected: %q, got: %q", expected, got)
+ }
+}
+
+func TestOnlyValidArgs(t *testing.T) {
+ c := &Command{
+ Use: "c",
+ Args: OnlyValidArgs,
+ ValidArgs: []string{"one", "two"},
+ Run: emptyRun,
+ }
+
+ output, err := executeCommand(c, "one", "two")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+}
+
+func TestOnlyValidArgsWithInvalidArgs(t *testing.T) {
+ c := &Command{
+ Use: "c",
+ Args: OnlyValidArgs,
+ ValidArgs: []string{"one", "two"},
+ Run: emptyRun,
+ }
+
+ _, err := executeCommand(c, "three")
+ if err == nil {
+ t.Fatal("Expected an error")
+ }
+
+ got := err.Error()
+ expected := `invalid argument "three" for "c"`
+ if got != expected {
+ t.Errorf("Expected: %q, got: %q", expected, got)
+ }
+}
+
+func TestArbitraryArgs(t *testing.T) {
+ c := &Command{Use: "c", Args: ArbitraryArgs, Run: emptyRun}
+ output, err := executeCommand(c, "a", "b")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+}
+
+func TestMinimumNArgs(t *testing.T) {
+ c := &Command{Use: "c", Args: MinimumNArgs(2), Run: emptyRun}
+ output, err := executeCommand(c, "a", "b", "c")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+}
+
+func TestMinimumNArgsWithLessArgs(t *testing.T) {
+ c := &Command{Use: "c", Args: MinimumNArgs(2), Run: emptyRun}
+ _, err := executeCommand(c, "a")
+
+ if err == nil {
+ t.Fatal("Expected an error")
+ }
+
+ got := err.Error()
+ expected := "requires at least 2 arg(s), only received 1"
+ if got != expected {
+ t.Fatalf("Expected %q, got %q", expected, got)
+ }
+}
+
+func TestMaximumNArgs(t *testing.T) {
+ c := &Command{Use: "c", Args: MaximumNArgs(3), Run: emptyRun}
+ output, err := executeCommand(c, "a", "b")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+}
+
+func TestMaximumNArgsWithMoreArgs(t *testing.T) {
+ c := &Command{Use: "c", Args: MaximumNArgs(2), Run: emptyRun}
+ _, err := executeCommand(c, "a", "b", "c")
+
+ if err == nil {
+ t.Fatal("Expected an error")
+ }
+
+ got := err.Error()
+ expected := "accepts at most 2 arg(s), received 3"
+ if got != expected {
+ t.Fatalf("Expected %q, got %q", expected, got)
+ }
+}
+
+func TestExactArgs(t *testing.T) {
+ c := &Command{Use: "c", Args: ExactArgs(3), Run: emptyRun}
+ output, err := executeCommand(c, "a", "b", "c")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+}
+
+func TestExactArgsWithInvalidCount(t *testing.T) {
+ c := &Command{Use: "c", Args: ExactArgs(2), Run: emptyRun}
+ _, err := executeCommand(c, "a", "b", "c")
+
+ if err == nil {
+ t.Fatal("Expected an error")
+ }
+
+ got := err.Error()
+ expected := "accepts 2 arg(s), received 3"
+ if got != expected {
+ t.Fatalf("Expected %q, got %q", expected, got)
+ }
+}
+
+func TestRangeArgs(t *testing.T) {
+ c := &Command{Use: "c", Args: RangeArgs(2, 4), Run: emptyRun}
+ output, err := executeCommand(c, "a", "b", "c")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+}
+
+func TestRangeArgsWithInvalidCount(t *testing.T) {
+ c := &Command{Use: "c", Args: RangeArgs(2, 4), Run: emptyRun}
+ _, err := executeCommand(c, "a")
+
+ if err == nil {
+ t.Fatal("Expected an error")
+ }
+
+ got := err.Error()
+ expected := "accepts between 2 and 4 arg(s), received 1"
+ if got != expected {
+ t.Fatalf("Expected %q, got %q", expected, got)
+ }
+}
+
+func TestRootTakesNoArgs(t *testing.T) {
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ childCmd := &Command{Use: "child", Run: emptyRun}
+ rootCmd.AddCommand(childCmd)
+
+ _, err := executeCommand(rootCmd, "illegal", "args")
+ if err == nil {
+ t.Fatal("Expected an error")
+ }
+
+ got := err.Error()
+ expected := `unknown command "illegal" for "root"`
+ if !strings.Contains(got, expected) {
+ t.Errorf("expected %q, got %q", expected, got)
+ }
+}
+
+func TestRootTakesArgs(t *testing.T) {
+ rootCmd := &Command{Use: "root", Args: ArbitraryArgs, Run: emptyRun}
+ childCmd := &Command{Use: "child", Run: emptyRun}
+ rootCmd.AddCommand(childCmd)
+
+ _, err := executeCommand(rootCmd, "legal", "args")
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+}
+
+func TestChildTakesNoArgs(t *testing.T) {
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ childCmd := &Command{Use: "child", Args: NoArgs, Run: emptyRun}
+ rootCmd.AddCommand(childCmd)
+
+ _, err := executeCommand(rootCmd, "child", "illegal", "args")
+ if err == nil {
+ t.Fatal("Expected an error")
+ }
+
+ got := err.Error()
+ expected := `unknown command "illegal" for "root child"`
+ if !strings.Contains(got, expected) {
+ t.Errorf("expected %q, got %q", expected, got)
+ }
+}
+
+func TestChildTakesArgs(t *testing.T) {
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ childCmd := &Command{Use: "child", Args: ArbitraryArgs, Run: emptyRun}
+ rootCmd.AddCommand(childCmd)
+
+ _, err := executeCommand(rootCmd, "child", "legal", "args")
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions_test.go b/vendor/github.com/spf13/cobra/bash_completions_test.go
new file mode 100644
index 0000000..02a4f15
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions_test.go
@@ -0,0 +1,217 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "os/exec"
+ "regexp"
+ "strings"
+ "testing"
+)
+
+func checkOmit(t *testing.T, found, unexpected string) {
+ if strings.Contains(found, unexpected) {
+ t.Errorf("Got: %q\nBut should not have!\n", unexpected)
+ }
+}
+
+func check(t *testing.T, found, expected string) {
+ if !strings.Contains(found, expected) {
+ t.Errorf("Expecting to contain: \n %q\nGot:\n %q\n", expected, found)
+ }
+}
+
+func checkRegex(t *testing.T, found, pattern string) {
+ matched, err := regexp.MatchString(pattern, found)
+ if err != nil {
+ t.Errorf("Error thrown performing MatchString: \n %s\n", err)
+ }
+ if !matched {
+ t.Errorf("Expecting to match: \n %q\nGot:\n %q\n", pattern, found)
+ }
+}
+
+func runShellCheck(s string) error {
+ excluded := []string{
+ "SC2034", // PREFIX appears unused. Verify it or export it.
+ }
+ cmd := exec.Command("shellcheck", "-s", "bash", "-", "-e", strings.Join(excluded, ","))
+ cmd.Stderr = os.Stderr
+ cmd.Stdout = os.Stdout
+
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ return err
+ }
+ go func() {
+ stdin.Write([]byte(s))
+ stdin.Close()
+ }()
+
+ return cmd.Run()
+}
+
+// World worst custom function, just keep telling you to enter hello!
+const bashCompletionFunc = `__custom_func() {
+ COMPREPLY=( "hello" )
+}
+`
+
+func TestBashCompletions(t *testing.T) {
+ rootCmd := &Command{
+ Use: "root",
+ ArgAliases: []string{"pods", "nodes", "services", "replicationcontrollers", "po", "no", "svc", "rc"},
+ ValidArgs: []string{"pod", "node", "service", "replicationcontroller"},
+ BashCompletionFunction: bashCompletionFunc,
+ Run: emptyRun,
+ }
+ rootCmd.Flags().IntP("introot", "i", -1, "help message for flag introot")
+ rootCmd.MarkFlagRequired("introot")
+
+ // Filename.
+ rootCmd.Flags().String("filename", "", "Enter a filename")
+ rootCmd.MarkFlagFilename("filename", "json", "yaml", "yml")
+
+ // Persistent filename.
+ rootCmd.PersistentFlags().String("persistent-filename", "", "Enter a filename")
+ rootCmd.MarkPersistentFlagFilename("persistent-filename")
+ rootCmd.MarkPersistentFlagRequired("persistent-filename")
+
+ // Filename extensions.
+ rootCmd.Flags().String("filename-ext", "", "Enter a filename (extension limited)")
+ rootCmd.MarkFlagFilename("filename-ext")
+ rootCmd.Flags().String("custom", "", "Enter a filename (extension limited)")
+ rootCmd.MarkFlagCustom("custom", "__complete_custom")
+
+ // Subdirectories in a given directory.
+ rootCmd.Flags().String("theme", "", "theme to use (located in /themes/THEMENAME/)")
+ rootCmd.Flags().SetAnnotation("theme", BashCompSubdirsInDir, []string{"themes"})
+
+ echoCmd := &Command{
+ Use: "echo [string to echo]",
+ Aliases: []string{"say"},
+ Short: "Echo anything to the screen",
+ Long: "an utterly useless command for testing.",
+ Example: "Just run cobra-test echo",
+ Run: emptyRun,
+ }
+
+ echoCmd.Flags().String("filename", "", "Enter a filename")
+ echoCmd.MarkFlagFilename("filename", "json", "yaml", "yml")
+ echoCmd.Flags().String("config", "", "config to use (located in /config/PROFILE/)")
+ echoCmd.Flags().SetAnnotation("config", BashCompSubdirsInDir, []string{"config"})
+
+ printCmd := &Command{
+ Use: "print [string to print]",
+ Args: MinimumNArgs(1),
+ Short: "Print anything to the screen",
+ Long: "an absolutely utterly useless command for testing.",
+ Run: emptyRun,
+ }
+
+ deprecatedCmd := &Command{
+ Use: "deprecated [can't do anything here]",
+ Args: NoArgs,
+ Short: "A command which is deprecated",
+ Long: "an absolutely utterly useless command for testing deprecation!.",
+ Deprecated: "Please use echo instead",
+ Run: emptyRun,
+ }
+
+ colonCmd := &Command{
+ Use: "cmd:colon",
+ Run: emptyRun,
+ }
+
+ timesCmd := &Command{
+ Use: "times [# times] [string to echo]",
+ SuggestFor: []string{"counts"},
+ Args: OnlyValidArgs,
+ ValidArgs: []string{"one", "two", "three", "four"},
+ Short: "Echo anything to the screen more times",
+ Long: "a slightly useless command for testing.",
+ Run: emptyRun,
+ }
+
+ echoCmd.AddCommand(timesCmd)
+ rootCmd.AddCommand(echoCmd, printCmd, deprecatedCmd, colonCmd)
+
+ buf := new(bytes.Buffer)
+ rootCmd.GenBashCompletion(buf)
+ output := buf.String()
+
+ check(t, output, "_root")
+ check(t, output, "_root_echo")
+ check(t, output, "_root_echo_times")
+ check(t, output, "_root_print")
+ check(t, output, "_root_cmd__colon")
+
+ // check for required flags
+ check(t, output, `must_have_one_flag+=("--introot=")`)
+ check(t, output, `must_have_one_flag+=("--persistent-filename=")`)
+ // check for custom completion function
+ check(t, output, `COMPREPLY=( "hello" )`)
+ // check for required nouns
+ check(t, output, `must_have_one_noun+=("pod")`)
+ // check for noun aliases
+ check(t, output, `noun_aliases+=("pods")`)
+ check(t, output, `noun_aliases+=("rc")`)
+ checkOmit(t, output, `must_have_one_noun+=("pods")`)
+ // check for filename extension flags
+ check(t, output, `flags_completion+=("_filedir")`)
+ // check for filename extension flags
+ check(t, output, `must_have_one_noun+=("three")`)
+ // check for filename extension flags
+ check(t, output, fmt.Sprintf(`flags_completion+=("__%s_handle_filename_extension_flag json|yaml|yml")`, rootCmd.Name()))
+ // check for filename extension flags in a subcommand
+ checkRegex(t, output, fmt.Sprintf(`_root_echo\(\)\n{[^}]*flags_completion\+=\("__%s_handle_filename_extension_flag json\|yaml\|yml"\)`, rootCmd.Name()))
+ // check for custom flags
+ check(t, output, `flags_completion+=("__complete_custom")`)
+ // check for subdirs_in_dir flags
+ check(t, output, fmt.Sprintf(`flags_completion+=("__%s_handle_subdirs_in_dir_flag themes")`, rootCmd.Name()))
+ // check for subdirs_in_dir flags in a subcommand
+ checkRegex(t, output, fmt.Sprintf(`_root_echo\(\)\n{[^}]*flags_completion\+=\("__%s_handle_subdirs_in_dir_flag config"\)`, rootCmd.Name()))
+
+ checkOmit(t, output, deprecatedCmd.Name())
+
+ // If available, run shellcheck against the script.
+ if err := exec.Command("which", "shellcheck").Run(); err != nil {
+ return
+ }
+ if err := runShellCheck(output); err != nil {
+ t.Fatalf("shellcheck failed: %v", err)
+ }
+}
+
+func TestBashCompletionHiddenFlag(t *testing.T) {
+ c := &Command{Use: "c", Run: emptyRun}
+
+ const flagName = "hiddenFlag"
+ c.Flags().Bool(flagName, false, "")
+ c.Flags().MarkHidden(flagName)
+
+ buf := new(bytes.Buffer)
+ c.GenBashCompletion(buf)
+ output := buf.String()
+
+ if strings.Contains(output, flagName) {
+ t.Errorf("Expected completion to not include %q flag: Got %v", flagName, output)
+ }
+}
+
+func TestBashCompletionDeprecatedFlag(t *testing.T) {
+ c := &Command{Use: "c", Run: emptyRun}
+
+ const flagName = "deprecated-flag"
+ c.Flags().Bool(flagName, false, "")
+ c.Flags().MarkDeprecated(flagName, "use --not-deprecated instead")
+
+ buf := new(bytes.Buffer)
+ c.GenBashCompletion(buf)
+ output := buf.String()
+
+ if strings.Contains(output, flagName) {
+ t.Errorf("expected completion to not include %q flag: Got %v", flagName, output)
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go
new file mode 100644
index 0000000..bc22e97
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go
@@ -0,0 +1,683 @@
+package cmd
+
+func initAgpl() {
+ Licenses["agpl"] = License{
+ Name: "GNU Affero General Public License",
+ PossibleMatches: []string{"agpl", "affero gpl", "gnu agpl"},
+ Header: `
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .`,
+ Text: ` GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+.
+`,
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go
new file mode 100644
index 0000000..38393d5
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go
@@ -0,0 +1,238 @@
+// Copyright © 2015 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Parts inspired by https://github.com/ryanuber/go-license
+
+package cmd
+
+func initApache2() {
+ Licenses["apache"] = License{
+ Name: "Apache 2.0",
+ PossibleMatches: []string{"apache", "apache20", "apache 2.0", "apache2.0", "apache-2.0"},
+ Header: `
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.`,
+ Text: `
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+`,
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go
new file mode 100644
index 0000000..4a847e0
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go
@@ -0,0 +1,71 @@
+// Copyright © 2015 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Parts inspired by https://github.com/ryanuber/go-license
+
+package cmd
+
+func initBsdClause2() {
+ Licenses["freebsd"] = License{
+ Name: "Simplified BSD License",
+ PossibleMatches: []string{"freebsd", "simpbsd", "simple bsd", "2-clause bsd",
+ "2 clause bsd", "simplified bsd license"},
+ Header: `All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.`,
+ Text: `{{ .copyright }}
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+`,
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go
new file mode 100644
index 0000000..c7476b3
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go
@@ -0,0 +1,78 @@
+// Copyright © 2015 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Parts inspired by https://github.com/ryanuber/go-license
+
+package cmd
+
+func initBsdClause3() {
+ Licenses["bsd"] = License{
+ Name: "NewBSD",
+ PossibleMatches: []string{"bsd", "newbsd", "3 clause bsd", "3-clause bsd"},
+ Header: `All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.`,
+ Text: `{{ .copyright }}
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+`,
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go
new file mode 100644
index 0000000..03e05b3
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go
@@ -0,0 +1,376 @@
+// Copyright © 2015 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Parts inspired by https://github.com/ryanuber/go-license
+
+package cmd
+
+func initGpl2() {
+ Licenses["gpl2"] = License{
+ Name: "GNU General Public License 2.0",
+ PossibleMatches: []string{"gpl2", "gnu gpl2", "gplv2"},
+ Header: `
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation; either version 2
+of the License, or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this program. If not, see .`,
+ Text: ` GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type 'show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type 'show c' for details.
+
+The hypothetical commands 'show w' and 'show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than 'show w' and 'show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ 'Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ , 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
+`,
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go
new file mode 100644
index 0000000..ce07679
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go
@@ -0,0 +1,711 @@
+// Copyright © 2015 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Parts inspired by https://github.com/ryanuber/go-license
+
+package cmd
+
+func initGpl3() {
+ Licenses["gpl3"] = License{
+ Name: "GNU General Public License 3.0",
+ PossibleMatches: []string{"gpl3", "gplv3", "gpl", "gnu gpl3", "gnu gpl"},
+ Header: `
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .`,
+ Text: ` GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type 'show c' for details.
+
+The hypothetical commands 'show w' and 'show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
+`,
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go
new file mode 100644
index 0000000..0f8b96c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go
@@ -0,0 +1,186 @@
+package cmd
+
+func initLgpl() {
+ Licenses["lgpl"] = License{
+ Name: "GNU Lesser General Public License",
+ PossibleMatches: []string{"lgpl", "lesser gpl", "gnu lgpl"},
+ Header: `
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Lesser General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this program. If not, see .`,
+ Text: ` GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.`,
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go
new file mode 100644
index 0000000..bd2d0c4
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go
@@ -0,0 +1,63 @@
+// Copyright © 2015 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Parts inspired by https://github.com/ryanuber/go-license
+
+package cmd
+
+func initMit() {
+ Licenses["mit"] = License{
+ Name: "MIT License",
+ PossibleMatches: []string{"mit"},
+ Header: `
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.`,
+ Text: `The MIT License (MIT)
+
+{{ .copyright }}
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+`,
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go b/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go
new file mode 100644
index 0000000..a070134
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go
@@ -0,0 +1,118 @@
+// Copyright © 2015 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Parts inspired by https://github.com/ryanuber/go-license
+
+package cmd
+
+import (
+ "strings"
+ "time"
+
+ "github.com/spf13/viper"
+)
+
+// Licenses contains all possible licenses a user can choose from.
+var Licenses = make(map[string]License)
+
+// License represents a software license agreement, containing the Name of
+// the license, its possible matches (on the command line as given to cobra),
+// the header to be used with each file on the file's creating, and the text
+// of the license
+type License struct {
+ Name string // The type of license in use
+ PossibleMatches []string // Similar names to guess
+ Text string // License text data
+ Header string // License header for source files
+}
+
+func init() {
+ // Allows a user to not use a license.
+ Licenses["none"] = License{"None", []string{"none", "false"}, "", ""}
+
+ initApache2()
+ initMit()
+ initBsdClause3()
+ initBsdClause2()
+ initGpl2()
+ initGpl3()
+ initLgpl()
+ initAgpl()
+}
+
+// getLicense returns license specified by user in flag or in config.
+// If user didn't specify the license, it returns Apache License 2.0.
+//
+// TODO: Inspect project for existing license
+func getLicense() License {
+ // If explicitly flagged, use that.
+ if userLicense != "" {
+ return findLicense(userLicense)
+ }
+
+ // If user wants to have custom license, use that.
+ if viper.IsSet("license.header") || viper.IsSet("license.text") {
+ return License{Header: viper.GetString("license.header"),
+ Text: viper.GetString("license.text")}
+ }
+
+ // If user wants to have built-in license, use that.
+ if viper.IsSet("license") {
+ return findLicense(viper.GetString("license"))
+ }
+
+ // If user didn't set any license, use Apache 2.0 by default.
+ return Licenses["apache"]
+}
+
+func copyrightLine() string {
+ author := viper.GetString("author")
+
+ year := viper.GetString("year") // For tests.
+ if year == "" {
+ year = time.Now().Format("2006")
+ }
+
+ return "Copyright © " + year + " " + author
+}
+
+// findLicense looks for License object of built-in licenses.
+// If it didn't find license, then the app will be terminated and
+// error will be printed.
+func findLicense(name string) License {
+ found := matchLicense(name)
+ if found == "" {
+ er("unknown license: " + name)
+ }
+ return Licenses[found]
+}
+
+// matchLicense compares the given a license name
+// to PossibleMatches of all built-in licenses.
+// It returns blank string, if name is blank string or it didn't find
+// then appropriate match to name.
+func matchLicense(name string) string {
+ if name == "" {
+ return ""
+ }
+
+ for key, lic := range Licenses {
+ for _, match := range lic.PossibleMatches {
+ if strings.EqualFold(name, match) {
+ return key
+ }
+ }
+ }
+
+ return ""
+}
diff --git a/vendor/github.com/spf13/cobra/cobra_test.go b/vendor/github.com/spf13/cobra/cobra_test.go
new file mode 100644
index 0000000..0d1755b
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra_test.go
@@ -0,0 +1,22 @@
+package cobra
+
+import (
+ "testing"
+ "text/template"
+)
+
+func TestAddTemplateFunctions(t *testing.T) {
+ AddTemplateFunc("t", func() bool { return true })
+ AddTemplateFuncs(template.FuncMap{
+ "f": func() bool { return false },
+ "h": func() string { return "Hello," },
+ "w": func() string { return "world." }})
+
+ c := &Command{}
+ c.SetUsageTemplate(`{{if t}}{{h}}{{end}}{{if f}}{{h}}{{end}} {{w}}`)
+
+ const expected = "Hello, world."
+ if got := c.UsageString(); got != expected {
+ t.Errorf("Expected UsageString: %v\nGot: %v", expected, got)
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/command_test.go b/vendor/github.com/spf13/cobra/command_test.go
new file mode 100644
index 0000000..ccee031
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_test.go
@@ -0,0 +1,1733 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/spf13/pflag"
+)
+
+func emptyRun(*Command, []string) {}
+
+func executeCommand(root *Command, args ...string) (output string, err error) {
+ _, output, err = executeCommandC(root, args...)
+ return output, err
+}
+
+func executeCommandC(root *Command, args ...string) (c *Command, output string, err error) {
+ buf := new(bytes.Buffer)
+ root.SetOutput(buf)
+ root.SetArgs(args)
+
+ c, err = root.ExecuteC()
+
+ return c, buf.String(), err
+}
+
+func resetCommandLineFlagSet() {
+ pflag.CommandLine = pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
+}
+
+func checkStringContains(t *testing.T, got, expected string) {
+ if !strings.Contains(got, expected) {
+ t.Errorf("Expected to contain: \n %v\nGot:\n %v\n", expected, got)
+ }
+}
+
+func checkStringOmits(t *testing.T, got, expected string) {
+ if strings.Contains(got, expected) {
+ t.Errorf("Expected to not contain: \n %v\nGot: %v", expected, got)
+ }
+}
+
+func TestSingleCommand(t *testing.T) {
+ var rootCmdArgs []string
+ rootCmd := &Command{
+ Use: "root",
+ Args: ExactArgs(2),
+ Run: func(_ *Command, args []string) { rootCmdArgs = args },
+ }
+ aCmd := &Command{Use: "a", Args: NoArgs, Run: emptyRun}
+ bCmd := &Command{Use: "b", Args: NoArgs, Run: emptyRun}
+ rootCmd.AddCommand(aCmd, bCmd)
+
+ output, err := executeCommand(rootCmd, "one", "two")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ got := strings.Join(rootCmdArgs, " ")
+ expected := "one two"
+ if got != expected {
+ t.Errorf("rootCmdArgs expected: %q, got: %q", expected, got)
+ }
+}
+
+func TestChildCommand(t *testing.T) {
+ var child1CmdArgs []string
+ rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun}
+ child1Cmd := &Command{
+ Use: "child1",
+ Args: ExactArgs(2),
+ Run: func(_ *Command, args []string) { child1CmdArgs = args },
+ }
+ child2Cmd := &Command{Use: "child2", Args: NoArgs, Run: emptyRun}
+ rootCmd.AddCommand(child1Cmd, child2Cmd)
+
+ output, err := executeCommand(rootCmd, "child1", "one", "two")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ got := strings.Join(child1CmdArgs, " ")
+ expected := "one two"
+ if got != expected {
+ t.Errorf("child1CmdArgs expected: %q, got: %q", expected, got)
+ }
+}
+
+func TestCallCommandWithoutSubcommands(t *testing.T) {
+ rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun}
+ _, err := executeCommand(rootCmd)
+ if err != nil {
+ t.Errorf("Calling command without subcommands should not have error: %v", err)
+ }
+}
+
+func TestRootExecuteUnknownCommand(t *testing.T) {
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ rootCmd.AddCommand(&Command{Use: "child", Run: emptyRun})
+
+ output, _ := executeCommand(rootCmd, "unknown")
+
+ expected := "Error: unknown command \"unknown\" for \"root\"\nRun 'root --help' for usage.\n"
+
+ if output != expected {
+ t.Errorf("Expected:\n %q\nGot:\n %q\n", expected, output)
+ }
+}
+
+func TestSubcommandExecuteC(t *testing.T) {
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ childCmd := &Command{Use: "child", Run: emptyRun}
+ rootCmd.AddCommand(childCmd)
+
+ c, output, err := executeCommandC(rootCmd, "child")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ if c.Name() != "child" {
+ t.Errorf(`invalid command returned from ExecuteC: expected "child"', got %q`, c.Name())
+ }
+}
+
+func TestRootUnknownCommandSilenced(t *testing.T) {
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ rootCmd.SilenceErrors = true
+ rootCmd.SilenceUsage = true
+ rootCmd.AddCommand(&Command{Use: "child", Run: emptyRun})
+
+ output, _ := executeCommand(rootCmd, "unknown")
+ if output != "" {
+ t.Errorf("Expected blank output, because of silenced usage.\nGot:\n %q\n", output)
+ }
+}
+
+func TestCommandAlias(t *testing.T) {
+ var timesCmdArgs []string
+ rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun}
+ echoCmd := &Command{
+ Use: "echo",
+ Aliases: []string{"say", "tell"},
+ Args: NoArgs,
+ Run: emptyRun,
+ }
+ timesCmd := &Command{
+ Use: "times",
+ Args: ExactArgs(2),
+ Run: func(_ *Command, args []string) { timesCmdArgs = args },
+ }
+ echoCmd.AddCommand(timesCmd)
+ rootCmd.AddCommand(echoCmd)
+
+ output, err := executeCommand(rootCmd, "tell", "times", "one", "two")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ got := strings.Join(timesCmdArgs, " ")
+ expected := "one two"
+ if got != expected {
+ t.Errorf("timesCmdArgs expected: %v, got: %v", expected, got)
+ }
+}
+
+func TestEnablePrefixMatching(t *testing.T) {
+ EnablePrefixMatching = true
+
+ var aCmdArgs []string
+ rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun}
+ aCmd := &Command{
+ Use: "aCmd",
+ Args: ExactArgs(2),
+ Run: func(_ *Command, args []string) { aCmdArgs = args },
+ }
+ bCmd := &Command{Use: "bCmd", Args: NoArgs, Run: emptyRun}
+ rootCmd.AddCommand(aCmd, bCmd)
+
+ output, err := executeCommand(rootCmd, "a", "one", "two")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ got := strings.Join(aCmdArgs, " ")
+ expected := "one two"
+ if got != expected {
+ t.Errorf("aCmdArgs expected: %q, got: %q", expected, got)
+ }
+
+ EnablePrefixMatching = false
+}
+
+func TestAliasPrefixMatching(t *testing.T) {
+ EnablePrefixMatching = true
+
+ var timesCmdArgs []string
+ rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun}
+ echoCmd := &Command{
+ Use: "echo",
+ Aliases: []string{"say", "tell"},
+ Args: NoArgs,
+ Run: emptyRun,
+ }
+ timesCmd := &Command{
+ Use: "times",
+ Args: ExactArgs(2),
+ Run: func(_ *Command, args []string) { timesCmdArgs = args },
+ }
+ echoCmd.AddCommand(timesCmd)
+ rootCmd.AddCommand(echoCmd)
+
+ output, err := executeCommand(rootCmd, "sa", "times", "one", "two")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ got := strings.Join(timesCmdArgs, " ")
+ expected := "one two"
+ if got != expected {
+ t.Errorf("timesCmdArgs expected: %v, got: %v", expected, got)
+ }
+
+ EnablePrefixMatching = false
+}
+
+// TestChildSameName checks the correct behaviour of cobra in cases,
+// when an application with name "foo" and with subcommand "foo"
+// is executed with args "foo foo".
+func TestChildSameName(t *testing.T) {
+ var fooCmdArgs []string
+ rootCmd := &Command{Use: "foo", Args: NoArgs, Run: emptyRun}
+ fooCmd := &Command{
+ Use: "foo",
+ Args: ExactArgs(2),
+ Run: func(_ *Command, args []string) { fooCmdArgs = args },
+ }
+ barCmd := &Command{Use: "bar", Args: NoArgs, Run: emptyRun}
+ rootCmd.AddCommand(fooCmd, barCmd)
+
+ output, err := executeCommand(rootCmd, "foo", "one", "two")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ got := strings.Join(fooCmdArgs, " ")
+ expected := "one two"
+ if got != expected {
+ t.Errorf("fooCmdArgs expected: %v, got: %v", expected, got)
+ }
+}
+
+// TestGrandChildSameName checks the correct behaviour of cobra in cases,
+// when user has a root command and a grand child
+// with the same name.
+func TestGrandChildSameName(t *testing.T) {
+ var fooCmdArgs []string
+ rootCmd := &Command{Use: "foo", Args: NoArgs, Run: emptyRun}
+ barCmd := &Command{Use: "bar", Args: NoArgs, Run: emptyRun}
+ fooCmd := &Command{
+ Use: "foo",
+ Args: ExactArgs(2),
+ Run: func(_ *Command, args []string) { fooCmdArgs = args },
+ }
+ barCmd.AddCommand(fooCmd)
+ rootCmd.AddCommand(barCmd)
+
+ output, err := executeCommand(rootCmd, "bar", "foo", "one", "two")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ got := strings.Join(fooCmdArgs, " ")
+ expected := "one two"
+ if got != expected {
+ t.Errorf("fooCmdArgs expected: %v, got: %v", expected, got)
+ }
+}
+
+func TestFlagLong(t *testing.T) {
+ var cArgs []string
+ c := &Command{
+ Use: "c",
+ Args: ArbitraryArgs,
+ Run: func(_ *Command, args []string) { cArgs = args },
+ }
+
+ var intFlagValue int
+ var stringFlagValue string
+ c.Flags().IntVar(&intFlagValue, "intf", -1, "")
+ c.Flags().StringVar(&stringFlagValue, "sf", "", "")
+
+ output, err := executeCommand(c, "--intf=7", "--sf=abc", "one", "--", "two")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", err)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ if c.ArgsLenAtDash() != 1 {
+ t.Errorf("Expected ArgsLenAtDash: %v but got %v", 1, c.ArgsLenAtDash())
+ }
+ if intFlagValue != 7 {
+ t.Errorf("Expected intFlagValue: %v, got %v", 7, intFlagValue)
+ }
+ if stringFlagValue != "abc" {
+ t.Errorf("Expected stringFlagValue: %q, got %q", "abc", stringFlagValue)
+ }
+
+ got := strings.Join(cArgs, " ")
+ expected := "one two"
+ if got != expected {
+ t.Errorf("Expected arguments: %q, got %q", expected, got)
+ }
+}
+
+func TestFlagShort(t *testing.T) {
+ var cArgs []string
+ c := &Command{
+ Use: "c",
+ Args: ArbitraryArgs,
+ Run: func(_ *Command, args []string) { cArgs = args },
+ }
+
+ var intFlagValue int
+ var stringFlagValue string
+ c.Flags().IntVarP(&intFlagValue, "intf", "i", -1, "")
+ c.Flags().StringVarP(&stringFlagValue, "sf", "s", "", "")
+
+ output, err := executeCommand(c, "-i", "7", "-sabc", "one", "two")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", err)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ if intFlagValue != 7 {
+ t.Errorf("Expected flag value: %v, got %v", 7, intFlagValue)
+ }
+ if stringFlagValue != "abc" {
+ t.Errorf("Expected stringFlagValue: %q, got %q", "abc", stringFlagValue)
+ }
+
+ got := strings.Join(cArgs, " ")
+ expected := "one two"
+ if got != expected {
+ t.Errorf("Expected arguments: %q, got %q", expected, got)
+ }
+}
+
+func TestChildFlag(t *testing.T) {
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ childCmd := &Command{Use: "child", Run: emptyRun}
+ rootCmd.AddCommand(childCmd)
+
+ var intFlagValue int
+ childCmd.Flags().IntVarP(&intFlagValue, "intf", "i", -1, "")
+
+ output, err := executeCommand(rootCmd, "child", "-i7")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", err)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ if intFlagValue != 7 {
+ t.Errorf("Expected flag value: %v, got %v", 7, intFlagValue)
+ }
+}
+
+func TestChildFlagWithParentLocalFlag(t *testing.T) {
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ childCmd := &Command{Use: "child", Run: emptyRun}
+ rootCmd.AddCommand(childCmd)
+
+ var intFlagValue int
+ rootCmd.Flags().StringP("sf", "s", "", "")
+ childCmd.Flags().IntVarP(&intFlagValue, "intf", "i", -1, "")
+
+ _, err := executeCommand(rootCmd, "child", "-i7", "-sabc")
+ if err == nil {
+ t.Errorf("Invalid flag should generate error")
+ }
+
+ checkStringContains(t, err.Error(), "unknown shorthand")
+
+ if intFlagValue != 7 {
+ t.Errorf("Expected flag value: %v, got %v", 7, intFlagValue)
+ }
+}
+
+func TestFlagInvalidInput(t *testing.T) {
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ rootCmd.Flags().IntP("intf", "i", -1, "")
+
+ _, err := executeCommand(rootCmd, "-iabc")
+ if err == nil {
+ t.Errorf("Invalid flag value should generate error")
+ }
+
+ checkStringContains(t, err.Error(), "invalid syntax")
+}
+
+func TestFlagBeforeCommand(t *testing.T) {
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ childCmd := &Command{Use: "child", Run: emptyRun}
+ rootCmd.AddCommand(childCmd)
+
+ var flagValue int
+ childCmd.Flags().IntVarP(&flagValue, "intf", "i", -1, "")
+
+ // With short flag.
+ _, err := executeCommand(rootCmd, "-i7", "child")
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if flagValue != 7 {
+ t.Errorf("Expected flag value: %v, got %v", 7, flagValue)
+ }
+
+ // With long flag.
+ _, err = executeCommand(rootCmd, "--intf=8", "child")
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if flagValue != 8 {
+ t.Errorf("Expected flag value: %v, got %v", 9, flagValue)
+ }
+}
+
+func TestStripFlags(t *testing.T) {
+ tests := []struct {
+ input []string
+ output []string
+ }{
+ {
+ []string{"foo", "bar"},
+ []string{"foo", "bar"},
+ },
+ {
+ []string{"foo", "--str", "-s"},
+ []string{"foo"},
+ },
+ {
+ []string{"-s", "foo", "--str", "bar"},
+ []string{},
+ },
+ {
+ []string{"-i10", "echo"},
+ []string{"echo"},
+ },
+ {
+ []string{"-i=10", "echo"},
+ []string{"echo"},
+ },
+ {
+ []string{"--int=100", "echo"},
+ []string{"echo"},
+ },
+ {
+ []string{"-ib", "echo", "-sfoo", "baz"},
+ []string{"echo", "baz"},
+ },
+ {
+ []string{"-i=baz", "bar", "-i", "foo", "blah"},
+ []string{"bar", "blah"},
+ },
+ {
+ []string{"--int=baz", "-sbar", "-i", "foo", "blah"},
+ []string{"blah"},
+ },
+ {
+ []string{"--bool", "bar", "-i", "foo", "blah"},
+ []string{"bar", "blah"},
+ },
+ {
+ []string{"-b", "bar", "-i", "foo", "blah"},
+ []string{"bar", "blah"},
+ },
+ {
+ []string{"--persist", "bar"},
+ []string{"bar"},
+ },
+ {
+ []string{"-p", "bar"},
+ []string{"bar"},
+ },
+ }
+
+ c := &Command{Use: "c", Run: emptyRun}
+ c.PersistentFlags().BoolP("persist", "p", false, "")
+ c.Flags().IntP("int", "i", -1, "")
+ c.Flags().StringP("str", "s", "", "")
+ c.Flags().BoolP("bool", "b", false, "")
+
+ for i, test := range tests {
+ got := stripFlags(test.input, c)
+ if !reflect.DeepEqual(test.output, got) {
+ t.Errorf("(%v) Expected: %v, got: %v", i, test.output, got)
+ }
+ }
+}
+
+func TestDisableFlagParsing(t *testing.T) {
+ var cArgs []string
+ c := &Command{
+ Use: "c",
+ DisableFlagParsing: true,
+ Run: func(_ *Command, args []string) {
+ cArgs = args
+ },
+ }
+
+ args := []string{"cmd", "-v", "-race", "-file", "foo.go"}
+ output, err := executeCommand(c, args...)
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ if !reflect.DeepEqual(args, cArgs) {
+ t.Errorf("Expected: %v, got: %v", args, cArgs)
+ }
+}
+
+func TestPersistentFlagsOnSameCommand(t *testing.T) {
+ var rootCmdArgs []string
+ rootCmd := &Command{
+ Use: "root",
+ Args: ArbitraryArgs,
+ Run: func(_ *Command, args []string) { rootCmdArgs = args },
+ }
+
+ var flagValue int
+ rootCmd.PersistentFlags().IntVarP(&flagValue, "intf", "i", -1, "")
+
+ output, err := executeCommand(rootCmd, "-i7", "one", "two")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ got := strings.Join(rootCmdArgs, " ")
+ expected := "one two"
+ if got != expected {
+ t.Errorf("rootCmdArgs expected: %q, got %q", expected, got)
+ }
+ if flagValue != 7 {
+ t.Errorf("flagValue expected: %v, got %v", 7, flagValue)
+ }
+}
+
+// TestEmptyInputs checks,
+// if flags correctly parsed with blank strings in args.
+func TestEmptyInputs(t *testing.T) {
+ c := &Command{Use: "c", Run: emptyRun}
+
+ var flagValue int
+ c.Flags().IntVarP(&flagValue, "intf", "i", -1, "")
+
+ output, err := executeCommand(c, "", "-i7", "")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ if flagValue != 7 {
+ t.Errorf("flagValue expected: %v, got %v", 7, flagValue)
+ }
+}
+
+func TestOverwrittenFlag(t *testing.T) {
+ // TODO: This test fails, but should work.
+ t.Skip()
+
+ parent := &Command{Use: "parent", Run: emptyRun}
+ child := &Command{Use: "child", Run: emptyRun}
+
+ parent.PersistentFlags().Bool("boolf", false, "")
+ parent.PersistentFlags().Int("intf", -1, "")
+ child.Flags().String("strf", "", "")
+ child.Flags().Int("intf", -1, "")
+
+ parent.AddCommand(child)
+
+ childInherited := child.InheritedFlags()
+ childLocal := child.LocalFlags()
+
+ if childLocal.Lookup("strf") == nil {
+ t.Error(`LocalFlags expected to contain "strf", got "nil"`)
+ }
+ if childInherited.Lookup("boolf") == nil {
+ t.Error(`InheritedFlags expected to contain "boolf", got "nil"`)
+ }
+
+ if childInherited.Lookup("intf") != nil {
+ t.Errorf(`InheritedFlags should not contain overwritten flag "intf"`)
+ }
+ if childLocal.Lookup("intf") == nil {
+ t.Error(`LocalFlags expected to contain "intf", got "nil"`)
+ }
+}
+
+func TestPersistentFlagsOnChild(t *testing.T) {
+ var childCmdArgs []string
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ childCmd := &Command{
+ Use: "child",
+ Args: ArbitraryArgs,
+ Run: func(_ *Command, args []string) { childCmdArgs = args },
+ }
+ rootCmd.AddCommand(childCmd)
+
+ var parentFlagValue int
+ var childFlagValue int
+ rootCmd.PersistentFlags().IntVarP(&parentFlagValue, "parentf", "p", -1, "")
+ childCmd.Flags().IntVarP(&childFlagValue, "childf", "c", -1, "")
+
+ output, err := executeCommand(rootCmd, "child", "-c7", "-p8", "one", "two")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ got := strings.Join(childCmdArgs, " ")
+ expected := "one two"
+ if got != expected {
+ t.Errorf("childCmdArgs expected: %q, got %q", expected, got)
+ }
+ if parentFlagValue != 8 {
+ t.Errorf("parentFlagValue expected: %v, got %v", 8, parentFlagValue)
+ }
+ if childFlagValue != 7 {
+ t.Errorf("childFlagValue expected: %v, got %v", 7, childFlagValue)
+ }
+}
+
+func TestRequiredFlags(t *testing.T) {
+ c := &Command{Use: "c", Run: emptyRun}
+ c.Flags().String("foo1", "", "")
+ c.MarkFlagRequired("foo1")
+ c.Flags().String("foo2", "", "")
+ c.MarkFlagRequired("foo2")
+ c.Flags().String("bar", "", "")
+
+ expected := fmt.Sprintf("required flag(s) %q, %q not set", "foo1", "foo2")
+
+ _, err := executeCommand(c)
+ got := err.Error()
+
+ if got != expected {
+ t.Errorf("Expected error: %q, got: %q", expected, got)
+ }
+}
+
+func TestPersistentRequiredFlags(t *testing.T) {
+ parent := &Command{Use: "parent", Run: emptyRun}
+ parent.PersistentFlags().String("foo1", "", "")
+ parent.MarkPersistentFlagRequired("foo1")
+ parent.PersistentFlags().String("foo2", "", "")
+ parent.MarkPersistentFlagRequired("foo2")
+ parent.Flags().String("foo3", "", "")
+
+ child := &Command{Use: "child", Run: emptyRun}
+ child.Flags().String("bar1", "", "")
+ child.MarkFlagRequired("bar1")
+ child.Flags().String("bar2", "", "")
+ child.MarkFlagRequired("bar2")
+ child.Flags().String("bar3", "", "")
+
+ parent.AddCommand(child)
+
+ expected := fmt.Sprintf("required flag(s) %q, %q, %q, %q not set", "bar1", "bar2", "foo1", "foo2")
+
+ _, err := executeCommand(parent, "child")
+ if err.Error() != expected {
+ t.Errorf("Expected %q, got %q", expected, err.Error())
+ }
+}
+
+func TestInitHelpFlagMergesFlags(t *testing.T) {
+ usage := "custom flag"
+ rootCmd := &Command{Use: "root"}
+ rootCmd.PersistentFlags().Bool("help", false, "custom flag")
+ childCmd := &Command{Use: "child"}
+ rootCmd.AddCommand(childCmd)
+
+ childCmd.InitDefaultHelpFlag()
+ got := childCmd.Flags().Lookup("help").Usage
+ if got != usage {
+ t.Errorf("Expected the help flag from the root command with usage: %v\nGot the default with usage: %v", usage, got)
+ }
+}
+
+func TestHelpCommandExecuted(t *testing.T) {
+ rootCmd := &Command{Use: "root", Long: "Long description", Run: emptyRun}
+ rootCmd.AddCommand(&Command{Use: "child", Run: emptyRun})
+
+ output, err := executeCommand(rootCmd, "help")
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ checkStringContains(t, output, rootCmd.Long)
+}
+
+func TestHelpCommandExecutedOnChild(t *testing.T) {
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ childCmd := &Command{Use: "child", Long: "Long description", Run: emptyRun}
+ rootCmd.AddCommand(childCmd)
+
+ output, err := executeCommand(rootCmd, "help", "child")
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ checkStringContains(t, output, childCmd.Long)
+}
+
+func TestSetHelpCommand(t *testing.T) {
+ c := &Command{Use: "c", Run: emptyRun}
+ c.AddCommand(&Command{Use: "empty", Run: emptyRun})
+
+ expected := "WORKS"
+ c.SetHelpCommand(&Command{
+ Use: "help [command]",
+ Short: "Help about any command",
+ Long: `Help provides help for any command in the application.
+ Simply type ` + c.Name() + ` help [path to command] for full details.`,
+ Run: func(c *Command, _ []string) { c.Print(expected) },
+ })
+
+ got, err := executeCommand(c, "help")
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ if got != expected {
+ t.Errorf("Expected to contain %q, got %q", expected, got)
+ }
+}
+
+func TestHelpFlagExecuted(t *testing.T) {
+ rootCmd := &Command{Use: "root", Long: "Long description", Run: emptyRun}
+
+ output, err := executeCommand(rootCmd, "--help")
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ checkStringContains(t, output, rootCmd.Long)
+}
+
+func TestHelpFlagExecutedOnChild(t *testing.T) {
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ childCmd := &Command{Use: "child", Long: "Long description", Run: emptyRun}
+ rootCmd.AddCommand(childCmd)
+
+ output, err := executeCommand(rootCmd, "child", "--help")
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ checkStringContains(t, output, childCmd.Long)
+}
+
+// TestHelpFlagInHelp checks,
+// if '--help' flag is shown in help for child (executing `parent help child`),
+// that has no other flags.
+// Related to https://github.com/spf13/cobra/issues/302.
+func TestHelpFlagInHelp(t *testing.T) {
+ parentCmd := &Command{Use: "parent", Run: func(*Command, []string) {}}
+
+ childCmd := &Command{Use: "child", Run: func(*Command, []string) {}}
+ parentCmd.AddCommand(childCmd)
+
+ output, err := executeCommand(parentCmd, "help", "child")
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ checkStringContains(t, output, "[flags]")
+}
+
+func TestFlagsInUsage(t *testing.T) {
+ rootCmd := &Command{Use: "root", Args: NoArgs, Run: func(*Command, []string) {}}
+ output, err := executeCommand(rootCmd, "--help")
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ checkStringContains(t, output, "[flags]")
+}
+
+func TestHelpExecutedOnNonRunnableChild(t *testing.T) {
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ childCmd := &Command{Use: "child", Long: "Long description"}
+ rootCmd.AddCommand(childCmd)
+
+ output, err := executeCommand(rootCmd, "child")
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ checkStringContains(t, output, childCmd.Long)
+}
+
+func TestVersionFlagExecuted(t *testing.T) {
+ rootCmd := &Command{Use: "root", Version: "1.0.0", Run: emptyRun}
+
+ output, err := executeCommand(rootCmd, "--version", "arg1")
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ checkStringContains(t, output, "root version 1.0.0")
+}
+
+func TestVersionTemplate(t *testing.T) {
+ rootCmd := &Command{Use: "root", Version: "1.0.0", Run: emptyRun}
+ rootCmd.SetVersionTemplate(`customized version: {{.Version}}`)
+
+ output, err := executeCommand(rootCmd, "--version", "arg1")
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ checkStringContains(t, output, "customized version: 1.0.0")
+}
+
+func TestVersionFlagExecutedOnSubcommand(t *testing.T) {
+ rootCmd := &Command{Use: "root", Version: "1.0.0"}
+ rootCmd.AddCommand(&Command{Use: "sub", Run: emptyRun})
+
+ output, err := executeCommand(rootCmd, "--version", "sub")
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ checkStringContains(t, output, "root version 1.0.0")
+}
+
+func TestVersionFlagOnlyAddedToRoot(t *testing.T) {
+ rootCmd := &Command{Use: "root", Version: "1.0.0", Run: emptyRun}
+ rootCmd.AddCommand(&Command{Use: "sub", Run: emptyRun})
+
+ _, err := executeCommand(rootCmd, "sub", "--version")
+ if err == nil {
+ t.Errorf("Expected error")
+ }
+
+ checkStringContains(t, err.Error(), "unknown flag: --version")
+}
+
+func TestVersionFlagOnlyExistsIfVersionNonEmpty(t *testing.T) {
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+
+ _, err := executeCommand(rootCmd, "--version")
+ if err == nil {
+ t.Errorf("Expected error")
+ }
+ checkStringContains(t, err.Error(), "unknown flag: --version")
+}
+
+func TestUsageIsNotPrintedTwice(t *testing.T) {
+ var cmd = &Command{Use: "root"}
+ var sub = &Command{Use: "sub"}
+ cmd.AddCommand(sub)
+
+ output, _ := executeCommand(cmd, "")
+ if strings.Count(output, "Usage:") != 1 {
+ t.Error("Usage output is not printed exactly once")
+ }
+}
+
+func TestVisitParents(t *testing.T) {
+ c := &Command{Use: "app"}
+ sub := &Command{Use: "sub"}
+ dsub := &Command{Use: "dsub"}
+ sub.AddCommand(dsub)
+ c.AddCommand(sub)
+
+ total := 0
+ add := func(x *Command) {
+ total++
+ }
+ sub.VisitParents(add)
+ if total != 1 {
+ t.Errorf("Should have visited 1 parent but visited %d", total)
+ }
+
+ total = 0
+ dsub.VisitParents(add)
+ if total != 2 {
+ t.Errorf("Should have visited 2 parents but visited %d", total)
+ }
+
+ total = 0
+ c.VisitParents(add)
+ if total != 0 {
+ t.Errorf("Should have visited no parents but visited %d", total)
+ }
+}
+
+func TestSuggestions(t *testing.T) {
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ timesCmd := &Command{
+ Use: "times",
+ SuggestFor: []string{"counts"},
+ Run: emptyRun,
+ }
+ rootCmd.AddCommand(timesCmd)
+
+ templateWithSuggestions := "Error: unknown command \"%s\" for \"root\"\n\nDid you mean this?\n\t%s\n\nRun 'root --help' for usage.\n"
+ templateWithoutSuggestions := "Error: unknown command \"%s\" for \"root\"\nRun 'root --help' for usage.\n"
+
+ tests := map[string]string{
+ "time": "times",
+ "tiems": "times",
+ "tims": "times",
+ "timeS": "times",
+ "rimes": "times",
+ "ti": "times",
+ "t": "times",
+ "timely": "times",
+ "ri": "",
+ "timezone": "",
+ "foo": "",
+ "counts": "times",
+ }
+
+ for typo, suggestion := range tests {
+ for _, suggestionsDisabled := range []bool{true, false} {
+ rootCmd.DisableSuggestions = suggestionsDisabled
+
+ var expected string
+ output, _ := executeCommand(rootCmd, typo)
+
+ if suggestion == "" || suggestionsDisabled {
+ expected = fmt.Sprintf(templateWithoutSuggestions, typo)
+ } else {
+ expected = fmt.Sprintf(templateWithSuggestions, typo, suggestion)
+ }
+
+ if output != expected {
+ t.Errorf("Unexpected response.\nExpected:\n %q\nGot:\n %q\n", expected, output)
+ }
+ }
+ }
+}
+
+func TestRemoveCommand(t *testing.T) {
+ rootCmd := &Command{Use: "root", Args: NoArgs, Run: emptyRun}
+ childCmd := &Command{Use: "child", Run: emptyRun}
+ rootCmd.AddCommand(childCmd)
+ rootCmd.RemoveCommand(childCmd)
+
+ _, err := executeCommand(rootCmd, "child")
+ if err == nil {
+ t.Error("Expected error on calling removed command. Got nil.")
+ }
+}
+
+func TestReplaceCommandWithRemove(t *testing.T) {
+ childUsed := 0
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ child1Cmd := &Command{
+ Use: "child",
+ Run: func(*Command, []string) { childUsed = 1 },
+ }
+ child2Cmd := &Command{
+ Use: "child",
+ Run: func(*Command, []string) { childUsed = 2 },
+ }
+ rootCmd.AddCommand(child1Cmd)
+ rootCmd.RemoveCommand(child1Cmd)
+ rootCmd.AddCommand(child2Cmd)
+
+ output, err := executeCommand(rootCmd, "child")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ if childUsed == 1 {
+ t.Error("Removed command shouldn't be called")
+ }
+ if childUsed != 2 {
+ t.Error("Replacing command should have been called but didn't")
+ }
+}
+
+func TestDeprecatedCommand(t *testing.T) {
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ deprecatedCmd := &Command{
+ Use: "deprecated",
+ Deprecated: "This command is deprecated",
+ Run: emptyRun,
+ }
+ rootCmd.AddCommand(deprecatedCmd)
+
+ output, err := executeCommand(rootCmd, "deprecated")
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ checkStringContains(t, output, deprecatedCmd.Deprecated)
+}
+
+func TestHooks(t *testing.T) {
+ var (
+ persPreArgs string
+ preArgs string
+ runArgs string
+ postArgs string
+ persPostArgs string
+ )
+
+ c := &Command{
+ Use: "c",
+ PersistentPreRun: func(_ *Command, args []string) {
+ persPreArgs = strings.Join(args, " ")
+ },
+ PreRun: func(_ *Command, args []string) {
+ preArgs = strings.Join(args, " ")
+ },
+ Run: func(_ *Command, args []string) {
+ runArgs = strings.Join(args, " ")
+ },
+ PostRun: func(_ *Command, args []string) {
+ postArgs = strings.Join(args, " ")
+ },
+ PersistentPostRun: func(_ *Command, args []string) {
+ persPostArgs = strings.Join(args, " ")
+ },
+ }
+
+ output, err := executeCommand(c, "one", "two")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ if persPreArgs != "one two" {
+ t.Errorf("Expected persPreArgs %q, got %q", "one two", persPreArgs)
+ }
+ if preArgs != "one two" {
+ t.Errorf("Expected preArgs %q, got %q", "one two", preArgs)
+ }
+ if runArgs != "one two" {
+ t.Errorf("Expected runArgs %q, got %q", "one two", runArgs)
+ }
+ if postArgs != "one two" {
+ t.Errorf("Expected postArgs %q, got %q", "one two", postArgs)
+ }
+ if persPostArgs != "one two" {
+ t.Errorf("Expected persPostArgs %q, got %q", "one two", persPostArgs)
+ }
+}
+
+func TestPersistentHooks(t *testing.T) {
+ var (
+ parentPersPreArgs string
+ parentPreArgs string
+ parentRunArgs string
+ parentPostArgs string
+ parentPersPostArgs string
+ )
+
+ var (
+ childPersPreArgs string
+ childPreArgs string
+ childRunArgs string
+ childPostArgs string
+ childPersPostArgs string
+ )
+
+ parentCmd := &Command{
+ Use: "parent",
+ PersistentPreRun: func(_ *Command, args []string) {
+ parentPersPreArgs = strings.Join(args, " ")
+ },
+ PreRun: func(_ *Command, args []string) {
+ parentPreArgs = strings.Join(args, " ")
+ },
+ Run: func(_ *Command, args []string) {
+ parentRunArgs = strings.Join(args, " ")
+ },
+ PostRun: func(_ *Command, args []string) {
+ parentPostArgs = strings.Join(args, " ")
+ },
+ PersistentPostRun: func(_ *Command, args []string) {
+ parentPersPostArgs = strings.Join(args, " ")
+ },
+ }
+
+ childCmd := &Command{
+ Use: "child",
+ PersistentPreRun: func(_ *Command, args []string) {
+ childPersPreArgs = strings.Join(args, " ")
+ },
+ PreRun: func(_ *Command, args []string) {
+ childPreArgs = strings.Join(args, " ")
+ },
+ Run: func(_ *Command, args []string) {
+ childRunArgs = strings.Join(args, " ")
+ },
+ PostRun: func(_ *Command, args []string) {
+ childPostArgs = strings.Join(args, " ")
+ },
+ PersistentPostRun: func(_ *Command, args []string) {
+ childPersPostArgs = strings.Join(args, " ")
+ },
+ }
+ parentCmd.AddCommand(childCmd)
+
+ output, err := executeCommand(parentCmd, "child", "one", "two")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ // TODO: This test fails, but should not.
+ // Related to https://github.com/spf13/cobra/issues/252.
+ //
+ // if parentPersPreArgs != "one two" {
+ // t.Errorf("Expected parentPersPreArgs %q, got %q", "one two", parentPersPreArgs)
+ // }
+ if parentPreArgs != "" {
+ t.Errorf("Expected blank parentPreArgs, got %q", parentPreArgs)
+ }
+ if parentRunArgs != "" {
+ t.Errorf("Expected blank parentRunArgs, got %q", parentRunArgs)
+ }
+ if parentPostArgs != "" {
+ t.Errorf("Expected blank parentPostArgs, got %q", parentPostArgs)
+ }
+ // TODO: This test fails, but should not.
+ // Related to https://github.com/spf13/cobra/issues/252.
+ //
+ // if parentPersPostArgs != "one two" {
+ // t.Errorf("Expected parentPersPostArgs %q, got %q", "one two", parentPersPostArgs)
+ // }
+
+ if childPersPreArgs != "one two" {
+ t.Errorf("Expected childPersPreArgs %q, got %q", "one two", childPersPreArgs)
+ }
+ if childPreArgs != "one two" {
+ t.Errorf("Expected childPreArgs %q, got %q", "one two", childPreArgs)
+ }
+ if childRunArgs != "one two" {
+ t.Errorf("Expected childRunArgs %q, got %q", "one two", childRunArgs)
+ }
+ if childPostArgs != "one two" {
+ t.Errorf("Expected childPostArgs %q, got %q", "one two", childPostArgs)
+ }
+ if childPersPostArgs != "one two" {
+ t.Errorf("Expected childPersPostArgs %q, got %q", "one two", childPersPostArgs)
+ }
+}
+
+// Related to https://github.com/spf13/cobra/issues/521.
+func TestGlobalNormFuncPropagation(t *testing.T) {
+ normFunc := func(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ return pflag.NormalizedName(name)
+ }
+
+ rootCmd := &Command{Use: "root", Run: emptyRun}
+ childCmd := &Command{Use: "child", Run: emptyRun}
+ rootCmd.AddCommand(childCmd)
+
+ rootCmd.SetGlobalNormalizationFunc(normFunc)
+ if reflect.ValueOf(normFunc).Pointer() != reflect.ValueOf(rootCmd.GlobalNormalizationFunc()).Pointer() {
+ t.Error("rootCmd seems to have a wrong normalization function")
+ }
+
+ if reflect.ValueOf(normFunc).Pointer() != reflect.ValueOf(childCmd.GlobalNormalizationFunc()).Pointer() {
+ t.Error("childCmd should have had the normalization function of rootCmd")
+ }
+}
+
+// Related to https://github.com/spf13/cobra/issues/521.
+func TestNormPassedOnLocal(t *testing.T) {
+ toUpper := func(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ return pflag.NormalizedName(strings.ToUpper(name))
+ }
+
+ c := &Command{}
+ c.Flags().Bool("flagname", true, "this is a dummy flag")
+ c.SetGlobalNormalizationFunc(toUpper)
+ if c.LocalFlags().Lookup("flagname") != c.LocalFlags().Lookup("FLAGNAME") {
+ t.Error("Normalization function should be passed on to Local flag set")
+ }
+}
+
+// Related to https://github.com/spf13/cobra/issues/521.
+func TestNormPassedOnInherited(t *testing.T) {
+ toUpper := func(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ return pflag.NormalizedName(strings.ToUpper(name))
+ }
+
+ c := &Command{}
+ c.SetGlobalNormalizationFunc(toUpper)
+
+ child1 := &Command{}
+ c.AddCommand(child1)
+
+ c.PersistentFlags().Bool("flagname", true, "")
+
+ child2 := &Command{}
+ c.AddCommand(child2)
+
+ inherited := child1.InheritedFlags()
+ if inherited.Lookup("flagname") == nil || inherited.Lookup("flagname") != inherited.Lookup("FLAGNAME") {
+ t.Error("Normalization function should be passed on to inherited flag set in command added before flag")
+ }
+
+ inherited = child2.InheritedFlags()
+ if inherited.Lookup("flagname") == nil || inherited.Lookup("flagname") != inherited.Lookup("FLAGNAME") {
+ t.Error("Normalization function should be passed on to inherited flag set in command added after flag")
+ }
+}
+
+// Related to https://github.com/spf13/cobra/issues/521.
+func TestConsistentNormalizedName(t *testing.T) {
+ toUpper := func(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ return pflag.NormalizedName(strings.ToUpper(name))
+ }
+ n := func(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ return pflag.NormalizedName(name)
+ }
+
+ c := &Command{}
+ c.Flags().Bool("flagname", true, "")
+ c.SetGlobalNormalizationFunc(toUpper)
+ c.SetGlobalNormalizationFunc(n)
+
+ if c.LocalFlags().Lookup("flagname") == c.LocalFlags().Lookup("FLAGNAME") {
+ t.Error("Normalizing flag names should not result in duplicate flags")
+ }
+}
+
+func TestFlagOnPflagCommandLine(t *testing.T) {
+ flagName := "flagOnCommandLine"
+ pflag.String(flagName, "", "about my flag")
+
+ c := &Command{Use: "c", Run: emptyRun}
+ c.AddCommand(&Command{Use: "child", Run: emptyRun})
+
+ output, _ := executeCommand(c, "--help")
+ checkStringContains(t, output, flagName)
+
+ resetCommandLineFlagSet()
+}
+
+// TestHiddenCommandExecutes checks,
+// if hidden commands run as intended.
+func TestHiddenCommandExecutes(t *testing.T) {
+ executed := false
+ c := &Command{
+ Use: "c",
+ Hidden: true,
+ Run: func(*Command, []string) { executed = true },
+ }
+
+ output, err := executeCommand(c)
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ if !executed {
+ t.Error("Hidden command should have been executed")
+ }
+}
+
+// test to ensure hidden commands do not show up in usage/help text
+func TestHiddenCommandIsHidden(t *testing.T) {
+ c := &Command{Use: "c", Hidden: true, Run: emptyRun}
+ if c.IsAvailableCommand() {
+ t.Errorf("Hidden command should be unavailable")
+ }
+}
+
+func TestCommandsAreSorted(t *testing.T) {
+ EnableCommandSorting = true
+
+ originalNames := []string{"middle", "zlast", "afirst"}
+ expectedNames := []string{"afirst", "middle", "zlast"}
+
+ var rootCmd = &Command{Use: "root"}
+
+ for _, name := range originalNames {
+ rootCmd.AddCommand(&Command{Use: name})
+ }
+
+ for i, c := range rootCmd.Commands() {
+ got := c.Name()
+ if expectedNames[i] != got {
+ t.Errorf("Expected: %s, got: %s", expectedNames[i], got)
+ }
+ }
+
+ EnableCommandSorting = true
+}
+
+func TestEnableCommandSortingIsDisabled(t *testing.T) {
+ EnableCommandSorting = false
+
+ originalNames := []string{"middle", "zlast", "afirst"}
+
+ var rootCmd = &Command{Use: "root"}
+
+ for _, name := range originalNames {
+ rootCmd.AddCommand(&Command{Use: name})
+ }
+
+ for i, c := range rootCmd.Commands() {
+ got := c.Name()
+ if originalNames[i] != got {
+ t.Errorf("expected: %s, got: %s", originalNames[i], got)
+ }
+ }
+
+ EnableCommandSorting = true
+}
+
+func TestSetOutput(t *testing.T) {
+ c := &Command{}
+ c.SetOutput(nil)
+ if out := c.OutOrStdout(); out != os.Stdout {
+ t.Errorf("Expected setting output to nil to revert back to stdout")
+ }
+}
+
+func TestFlagErrorFunc(t *testing.T) {
+ c := &Command{Use: "c", Run: emptyRun}
+
+ expectedFmt := "This is expected: %v"
+ c.SetFlagErrorFunc(func(_ *Command, err error) error {
+ return fmt.Errorf(expectedFmt, err)
+ })
+
+ _, err := executeCommand(c, "--unknown-flag")
+
+ got := err.Error()
+ expected := fmt.Sprintf(expectedFmt, "unknown flag: --unknown-flag")
+ if got != expected {
+ t.Errorf("Expected %v, got %v", expected, got)
+ }
+}
+
+// TestSortedFlags checks,
+// if cmd.LocalFlags() is unsorted when cmd.Flags().SortFlags set to false.
+// Related to https://github.com/spf13/cobra/issues/404.
+func TestSortedFlags(t *testing.T) {
+ c := &Command{}
+ c.Flags().SortFlags = false
+ names := []string{"C", "B", "A", "D"}
+ for _, name := range names {
+ c.Flags().Bool(name, false, "")
+ }
+
+ i := 0
+ c.LocalFlags().VisitAll(func(f *pflag.Flag) {
+ if i == len(names) {
+ return
+ }
+ if stringInSlice(f.Name, names) {
+ if names[i] != f.Name {
+ t.Errorf("Incorrect order. Expected %v, got %v", names[i], f.Name)
+ }
+ i++
+ }
+ })
+}
+
+// TestMergeCommandLineToFlags checks,
+// if pflag.CommandLine is correctly merged to c.Flags() after first call
+// of c.mergePersistentFlags.
+// Related to https://github.com/spf13/cobra/issues/443.
+func TestMergeCommandLineToFlags(t *testing.T) {
+ pflag.Bool("boolflag", false, "")
+ c := &Command{Use: "c", Run: emptyRun}
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("boolflag") == nil {
+ t.Fatal("Expecting to have flag from CommandLine in c.Flags()")
+ }
+
+ resetCommandLineFlagSet()
+}
+
+// TestUseDeprecatedFlags checks,
+// if cobra.Execute() prints a message, if a deprecated flag is used.
+// Related to https://github.com/spf13/cobra/issues/463.
+func TestUseDeprecatedFlags(t *testing.T) {
+ c := &Command{Use: "c", Run: emptyRun}
+ c.Flags().BoolP("deprecated", "d", false, "deprecated flag")
+ c.Flags().MarkDeprecated("deprecated", "This flag is deprecated")
+
+ output, err := executeCommand(c, "c", "-d")
+ if err != nil {
+ t.Error("Unexpected error:", err)
+ }
+ checkStringContains(t, output, "This flag is deprecated")
+}
+
+func TestTraverseWithParentFlags(t *testing.T) {
+ rootCmd := &Command{Use: "root", TraverseChildren: true}
+ rootCmd.Flags().String("str", "", "")
+ rootCmd.Flags().BoolP("bool", "b", false, "")
+
+ childCmd := &Command{Use: "child"}
+ childCmd.Flags().Int("int", -1, "")
+
+ rootCmd.AddCommand(childCmd)
+
+ c, args, err := rootCmd.Traverse([]string{"-b", "--str", "ok", "child", "--int"})
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if len(args) != 1 && args[0] != "--add" {
+ t.Errorf("Wrong args: %v", args)
+ }
+ if c.Name() != childCmd.Name() {
+ t.Errorf("Expected command: %q, got: %q", childCmd.Name(), c.Name())
+ }
+}
+
+func TestTraverseNoParentFlags(t *testing.T) {
+ rootCmd := &Command{Use: "root", TraverseChildren: true}
+ rootCmd.Flags().String("foo", "", "foo things")
+
+ childCmd := &Command{Use: "child"}
+ childCmd.Flags().String("str", "", "")
+ rootCmd.AddCommand(childCmd)
+
+ c, args, err := rootCmd.Traverse([]string{"child"})
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if len(args) != 0 {
+ t.Errorf("Wrong args %v", args)
+ }
+ if c.Name() != childCmd.Name() {
+ t.Errorf("Expected command: %q, got: %q", childCmd.Name(), c.Name())
+ }
+}
+
+func TestTraverseWithBadParentFlags(t *testing.T) {
+ rootCmd := &Command{Use: "root", TraverseChildren: true}
+
+ childCmd := &Command{Use: "child"}
+ childCmd.Flags().String("str", "", "")
+ rootCmd.AddCommand(childCmd)
+
+ expected := "unknown flag: --str"
+
+ c, _, err := rootCmd.Traverse([]string{"--str", "ok", "child"})
+ if err == nil || !strings.Contains(err.Error(), expected) {
+ t.Errorf("Expected error, %q, got %q", expected, err)
+ }
+ if c != nil {
+ t.Errorf("Expected nil command")
+ }
+}
+
+func TestTraverseWithBadChildFlag(t *testing.T) {
+ rootCmd := &Command{Use: "root", TraverseChildren: true}
+ rootCmd.Flags().String("str", "", "")
+
+ childCmd := &Command{Use: "child"}
+ rootCmd.AddCommand(childCmd)
+
+ // Expect no error because the last commands args shouldn't be parsed in
+ // Traverse.
+ c, args, err := rootCmd.Traverse([]string{"child", "--str"})
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if len(args) != 1 && args[0] != "--str" {
+ t.Errorf("Wrong args: %v", args)
+ }
+ if c.Name() != childCmd.Name() {
+ t.Errorf("Expected command %q, got: %q", childCmd.Name(), c.Name())
+ }
+}
+
+func TestTraverseWithTwoSubcommands(t *testing.T) {
+ rootCmd := &Command{Use: "root", TraverseChildren: true}
+
+ subCmd := &Command{Use: "sub", TraverseChildren: true}
+ rootCmd.AddCommand(subCmd)
+
+ subsubCmd := &Command{
+ Use: "subsub",
+ }
+ subCmd.AddCommand(subsubCmd)
+
+ c, _, err := rootCmd.Traverse([]string{"sub", "subsub"})
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ if c.Name() != subsubCmd.Name() {
+ t.Fatalf("Expected command: %q, got %q", subsubCmd.Name(), c.Name())
+ }
+}
+
+// TestUpdateName checks if c.Name() updates on changed c.Use.
+// Related to https://github.com/spf13/cobra/pull/422#discussion_r143918343.
+func TestUpdateName(t *testing.T) {
+ c := &Command{Use: "name xyz"}
+ originalName := c.Name()
+
+ c.Use = "changedName abc"
+ if originalName == c.Name() || c.Name() != "changedName" {
+ t.Error("c.Name() should be updated on changed c.Use")
+ }
+}
+
+type calledAsTestcase struct {
+ args []string
+ call string
+ want string
+ epm bool
+ tc bool
+}
+
+func (tc *calledAsTestcase) test(t *testing.T) {
+ defer func(ov bool) { EnablePrefixMatching = ov }(EnablePrefixMatching)
+ EnablePrefixMatching = tc.epm
+
+ var called *Command
+ run := func(c *Command, _ []string) { t.Logf("called: %q", c.Name()); called = c }
+
+ parent := &Command{Use: "parent", Run: run}
+ child1 := &Command{Use: "child1", Run: run, Aliases: []string{"this"}}
+ child2 := &Command{Use: "child2", Run: run, Aliases: []string{"that"}}
+
+ parent.AddCommand(child1)
+ parent.AddCommand(child2)
+ parent.SetArgs(tc.args)
+
+ output := new(bytes.Buffer)
+ parent.SetOutput(output)
+
+ parent.Execute()
+
+ if called == nil {
+ if tc.call != "" {
+ t.Errorf("missing expected call to command: %s", tc.call)
+ }
+ return
+ }
+
+ if called.Name() != tc.call {
+ t.Errorf("called command == %q; Wanted %q", called.Name(), tc.call)
+ } else if got := called.CalledAs(); got != tc.want {
+ t.Errorf("%s.CalledAs() == %q; Wanted: %q", tc.call, got, tc.want)
+ }
+}
+
+func TestCalledAs(t *testing.T) {
+ tests := map[string]calledAsTestcase{
+ "find/no-args": {nil, "parent", "parent", false, false},
+ "find/real-name": {[]string{"child1"}, "child1", "child1", false, false},
+ "find/full-alias": {[]string{"that"}, "child2", "that", false, false},
+ "find/part-no-prefix": {[]string{"thi"}, "", "", false, false},
+ "find/part-alias": {[]string{"thi"}, "child1", "this", true, false},
+ "find/conflict": {[]string{"th"}, "", "", true, false},
+ "traverse/no-args": {nil, "parent", "parent", false, true},
+ "traverse/real-name": {[]string{"child1"}, "child1", "child1", false, true},
+ "traverse/full-alias": {[]string{"that"}, "child2", "that", false, true},
+ "traverse/part-no-prefix": {[]string{"thi"}, "", "", false, true},
+ "traverse/part-alias": {[]string{"thi"}, "child1", "this", true, true},
+ "traverse/conflict": {[]string{"th"}, "", "", true, true},
+ }
+
+ for name, tc := range tests {
+ t.Run(name, tc.test)
+ }
+}
+
+func TestFParseErrWhitelistBackwardCompatibility(t *testing.T) {
+ c := &Command{Use: "c", Run: emptyRun}
+ c.Flags().BoolP("boola", "a", false, "a boolean flag")
+
+ output, err := executeCommand(c, "c", "-a", "--unknown", "flag")
+ if err == nil {
+ t.Error("expected unknown flag error")
+ }
+ checkStringContains(t, output, "unknown flag: --unknown")
+}
+
+func TestFParseErrWhitelistSameCommand(t *testing.T) {
+ c := &Command{
+ Use: "c",
+ Run: emptyRun,
+ FParseErrWhitelist: FParseErrWhitelist{
+ UnknownFlags: true,
+ },
+ }
+ c.Flags().BoolP("boola", "a", false, "a boolean flag")
+
+ _, err := executeCommand(c, "c", "-a", "--unknown", "flag")
+ if err != nil {
+ t.Error("unexpected error: ", err)
+ }
+}
+
+func TestFParseErrWhitelistParentCommand(t *testing.T) {
+ root := &Command{
+ Use: "root",
+ Run: emptyRun,
+ FParseErrWhitelist: FParseErrWhitelist{
+ UnknownFlags: true,
+ },
+ }
+
+ c := &Command{
+ Use: "child",
+ Run: emptyRun,
+ }
+ c.Flags().BoolP("boola", "a", false, "a boolean flag")
+
+ root.AddCommand(c)
+
+ output, err := executeCommand(root, "child", "-a", "--unknown", "flag")
+ if err == nil {
+ t.Error("expected unknown flag error")
+ }
+ checkStringContains(t, output, "unknown flag: --unknown")
+}
+
+func TestFParseErrWhitelistChildCommand(t *testing.T) {
+ root := &Command{
+ Use: "root",
+ Run: emptyRun,
+ }
+
+ c := &Command{
+ Use: "child",
+ Run: emptyRun,
+ FParseErrWhitelist: FParseErrWhitelist{
+ UnknownFlags: true,
+ },
+ }
+ c.Flags().BoolP("boola", "a", false, "a boolean flag")
+
+ root.AddCommand(c)
+
+ _, err := executeCommand(root, "child", "-a", "--unknown", "flag")
+ if err != nil {
+ t.Error("unexpected error: ", err.Error())
+ }
+}
+
+func TestFParseErrWhitelistSiblingCommand(t *testing.T) {
+ root := &Command{
+ Use: "root",
+ Run: emptyRun,
+ }
+
+ c := &Command{
+ Use: "child",
+ Run: emptyRun,
+ FParseErrWhitelist: FParseErrWhitelist{
+ UnknownFlags: true,
+ },
+ }
+ c.Flags().BoolP("boola", "a", false, "a boolean flag")
+
+ s := &Command{
+ Use: "sibling",
+ Run: emptyRun,
+ }
+ s.Flags().BoolP("boolb", "b", false, "a boolean flag")
+
+ root.AddCommand(c)
+ root.AddCommand(s)
+
+ output, err := executeCommand(root, "sibling", "-b", "--unknown", "flag")
+ if err == nil {
+ t.Error("expected unknown flag error")
+ }
+ checkStringContains(t, output, "unknown flag: --unknown")
+}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions_test.go b/vendor/github.com/spf13/cobra/zsh_completions_test.go
new file mode 100644
index 0000000..34e6949
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions_test.go
@@ -0,0 +1,89 @@
+package cobra
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+)
+
+func TestZshCompletion(t *testing.T) {
+ tcs := []struct {
+ name string
+ root *Command
+ expectedExpressions []string
+ }{
+ {
+ name: "trivial",
+ root: &Command{Use: "trivialapp"},
+ expectedExpressions: []string{"#compdef trivial"},
+ },
+ {
+ name: "linear",
+ root: func() *Command {
+ r := &Command{Use: "linear"}
+
+ sub1 := &Command{Use: "sub1"}
+ r.AddCommand(sub1)
+
+ sub2 := &Command{Use: "sub2"}
+ sub1.AddCommand(sub2)
+
+ sub3 := &Command{Use: "sub3"}
+ sub2.AddCommand(sub3)
+ return r
+ }(),
+ expectedExpressions: []string{"sub1", "sub2", "sub3"},
+ },
+ {
+ name: "flat",
+ root: func() *Command {
+ r := &Command{Use: "flat"}
+ r.AddCommand(&Command{Use: "c1"})
+ r.AddCommand(&Command{Use: "c2"})
+ return r
+ }(),
+ expectedExpressions: []string{"(c1 c2)"},
+ },
+ {
+ name: "tree",
+ root: func() *Command {
+ r := &Command{Use: "tree"}
+
+ sub1 := &Command{Use: "sub1"}
+ r.AddCommand(sub1)
+
+ sub11 := &Command{Use: "sub11"}
+ sub12 := &Command{Use: "sub12"}
+
+ sub1.AddCommand(sub11)
+ sub1.AddCommand(sub12)
+
+ sub2 := &Command{Use: "sub2"}
+ r.AddCommand(sub2)
+
+ sub21 := &Command{Use: "sub21"}
+ sub22 := &Command{Use: "sub22"}
+
+ sub2.AddCommand(sub21)
+ sub2.AddCommand(sub22)
+
+ return r
+ }(),
+ expectedExpressions: []string{"(sub11 sub12)", "(sub21 sub22)"},
+ },
+ }
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ buf := new(bytes.Buffer)
+ tc.root.GenZshCompletion(buf)
+ output := buf.String()
+
+ for _, expectedExpression := range tc.expectedExpressions {
+ if !strings.Contains(output, expectedExpression) {
+ t.Errorf("Expected completion to contain %q somewhere; got %q", expectedExpression, output)
+ }
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/bool_slice_test.go b/vendor/github.com/spf13/pflag/bool_slice_test.go
new file mode 100644
index 0000000..b617dd2
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool_slice_test.go
@@ -0,0 +1,215 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func setUpBSFlagSet(bsp *[]bool) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.BoolSliceVar(bsp, "bs", []bool{}, "Command separated list!")
+ return f
+}
+
+func setUpBSFlagSetWithDefault(bsp *[]bool) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.BoolSliceVar(bsp, "bs", []bool{false, true}, "Command separated list!")
+ return f
+}
+
+func TestEmptyBS(t *testing.T) {
+ var bs []bool
+ f := setUpBSFlagSet(&bs)
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ getBS, err := f.GetBoolSlice("bs")
+ if err != nil {
+ t.Fatal("got an error from GetBoolSlice():", err)
+ }
+ if len(getBS) != 0 {
+ t.Fatalf("got bs %v with len=%d but expected length=0", getBS, len(getBS))
+ }
+}
+
+func TestBS(t *testing.T) {
+ var bs []bool
+ f := setUpBSFlagSet(&bs)
+
+ vals := []string{"1", "F", "TRUE", "0"}
+ arg := fmt.Sprintf("--bs=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range bs {
+ b, err := strconv.ParseBool(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if b != v {
+ t.Fatalf("expected is[%d] to be %s but got: %t", i, vals[i], v)
+ }
+ }
+ getBS, err := f.GetBoolSlice("bs")
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ for i, v := range getBS {
+ b, err := strconv.ParseBool(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if b != v {
+ t.Fatalf("expected bs[%d] to be %s but got: %t from GetBoolSlice", i, vals[i], v)
+ }
+ }
+}
+
+func TestBSDefault(t *testing.T) {
+ var bs []bool
+ f := setUpBSFlagSetWithDefault(&bs)
+
+ vals := []string{"false", "T"}
+
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range bs {
+ b, err := strconv.ParseBool(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if b != v {
+ t.Fatalf("expected bs[%d] to be %t from GetBoolSlice but got: %t", i, b, v)
+ }
+ }
+
+ getBS, err := f.GetBoolSlice("bs")
+ if err != nil {
+ t.Fatal("got an error from GetBoolSlice():", err)
+ }
+ for i, v := range getBS {
+ b, err := strconv.ParseBool(vals[i])
+ if err != nil {
+ t.Fatal("got an error from GetBoolSlice():", err)
+ }
+ if b != v {
+ t.Fatalf("expected bs[%d] to be %t from GetBoolSlice but got: %t", i, b, v)
+ }
+ }
+}
+
+func TestBSWithDefault(t *testing.T) {
+ var bs []bool
+ f := setUpBSFlagSetWithDefault(&bs)
+
+ vals := []string{"FALSE", "1"}
+ arg := fmt.Sprintf("--bs=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range bs {
+ b, err := strconv.ParseBool(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if b != v {
+ t.Fatalf("expected bs[%d] to be %t but got: %t", i, b, v)
+ }
+ }
+
+ getBS, err := f.GetBoolSlice("bs")
+ if err != nil {
+ t.Fatal("got an error from GetBoolSlice():", err)
+ }
+ for i, v := range getBS {
+ b, err := strconv.ParseBool(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if b != v {
+ t.Fatalf("expected bs[%d] to be %t from GetBoolSlice but got: %t", i, b, v)
+ }
+ }
+}
+
+func TestBSCalledTwice(t *testing.T) {
+ var bs []bool
+ f := setUpBSFlagSet(&bs)
+
+ in := []string{"T,F", "T"}
+ expected := []bool{true, false, true}
+ argfmt := "--bs=%s"
+ arg1 := fmt.Sprintf(argfmt, in[0])
+ arg2 := fmt.Sprintf(argfmt, in[1])
+ err := f.Parse([]string{arg1, arg2})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range bs {
+ if expected[i] != v {
+ t.Fatalf("expected bs[%d] to be %t but got %t", i, expected[i], v)
+ }
+ }
+}
+
+func TestBSBadQuoting(t *testing.T) {
+
+ tests := []struct {
+ Want []bool
+ FlagArg []string
+ }{
+ {
+ Want: []bool{true, false, true},
+ FlagArg: []string{"1", "0", "true"},
+ },
+ {
+ Want: []bool{true, false},
+ FlagArg: []string{"True", "F"},
+ },
+ {
+ Want: []bool{true, false},
+ FlagArg: []string{"T", "0"},
+ },
+ {
+ Want: []bool{true, false},
+ FlagArg: []string{"1", "0"},
+ },
+ {
+ Want: []bool{true, false, false},
+ FlagArg: []string{"true,false", "false"},
+ },
+ {
+ Want: []bool{true, false, false, true, false, true, false},
+ FlagArg: []string{`"true,false,false,1,0, T"`, " false "},
+ },
+ {
+ Want: []bool{false, false, true, false, true, false, true},
+ FlagArg: []string{`"0, False, T,false , true,F"`, "true"},
+ },
+ }
+
+ for i, test := range tests {
+
+ var bs []bool
+ f := setUpBSFlagSet(&bs)
+
+ if err := f.Parse([]string{fmt.Sprintf("--bs=%s", strings.Join(test.FlagArg, ","))}); err != nil {
+ t.Fatalf("flag parsing failed with error: %s\nparsing:\t%#v\nwant:\t\t%#v",
+ err, test.FlagArg, test.Want[i])
+ }
+
+ for j, b := range bs {
+ if b != test.Want[j] {
+ t.Fatalf("bad value parsed for test %d on bool %d:\nwant:\t%t\ngot:\t%t", i, j, test.Want[j], b)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/bool_test.go b/vendor/github.com/spf13/pflag/bool_test.go
new file mode 100644
index 0000000..a4319e7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool_test.go
@@ -0,0 +1,179 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ "bytes"
+ "strconv"
+ "testing"
+)
+
+// This value can be a boolean ("true", "false") or "maybe"
+type triStateValue int
+
+const (
+ triStateFalse triStateValue = 0
+ triStateTrue triStateValue = 1
+ triStateMaybe triStateValue = 2
+)
+
+const strTriStateMaybe = "maybe"
+
+func (v *triStateValue) IsBoolFlag() bool {
+ return true
+}
+
+func (v *triStateValue) Get() interface{} {
+ return triStateValue(*v)
+}
+
+func (v *triStateValue) Set(s string) error {
+ if s == strTriStateMaybe {
+ *v = triStateMaybe
+ return nil
+ }
+ boolVal, err := strconv.ParseBool(s)
+ if boolVal {
+ *v = triStateTrue
+ } else {
+ *v = triStateFalse
+ }
+ return err
+}
+
+func (v *triStateValue) String() string {
+ if *v == triStateMaybe {
+ return strTriStateMaybe
+ }
+ return strconv.FormatBool(*v == triStateTrue)
+}
+
+// The type of the flag as required by the pflag.Value interface
+func (v *triStateValue) Type() string {
+ return "version"
+}
+
+func setUpFlagSet(tristate *triStateValue) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ *tristate = triStateFalse
+ flag := f.VarPF(tristate, "tristate", "t", "tristate value (true, maybe or false)")
+ flag.NoOptDefVal = "true"
+ return f
+}
+
+func TestExplicitTrue(t *testing.T) {
+ var tristate triStateValue
+ f := setUpFlagSet(&tristate)
+ err := f.Parse([]string{"--tristate=true"})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ if tristate != triStateTrue {
+ t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead")
+ }
+}
+
+func TestImplicitTrue(t *testing.T) {
+ var tristate triStateValue
+ f := setUpFlagSet(&tristate)
+ err := f.Parse([]string{"--tristate"})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ if tristate != triStateTrue {
+ t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead")
+ }
+}
+
+func TestShortFlag(t *testing.T) {
+ var tristate triStateValue
+ f := setUpFlagSet(&tristate)
+ err := f.Parse([]string{"-t"})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ if tristate != triStateTrue {
+ t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead")
+ }
+}
+
+func TestShortFlagExtraArgument(t *testing.T) {
+ var tristate triStateValue
+ f := setUpFlagSet(&tristate)
+ // The"maybe"turns into an arg, since short boolean options will only do true/false
+ err := f.Parse([]string{"-t", "maybe"})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ if tristate != triStateTrue {
+ t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead")
+ }
+ args := f.Args()
+ if len(args) != 1 || args[0] != "maybe" {
+ t.Fatal("expected an extra 'maybe' argument to stick around")
+ }
+}
+
+func TestExplicitMaybe(t *testing.T) {
+ var tristate triStateValue
+ f := setUpFlagSet(&tristate)
+ err := f.Parse([]string{"--tristate=maybe"})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ if tristate != triStateMaybe {
+ t.Fatal("expected", triStateMaybe, "(triStateMaybe) but got", tristate, "instead")
+ }
+}
+
+func TestExplicitFalse(t *testing.T) {
+ var tristate triStateValue
+ f := setUpFlagSet(&tristate)
+ err := f.Parse([]string{"--tristate=false"})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ if tristate != triStateFalse {
+ t.Fatal("expected", triStateFalse, "(triStateFalse) but got", tristate, "instead")
+ }
+}
+
+func TestImplicitFalse(t *testing.T) {
+ var tristate triStateValue
+ f := setUpFlagSet(&tristate)
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ if tristate != triStateFalse {
+ t.Fatal("expected", triStateFalse, "(triStateFalse) but got", tristate, "instead")
+ }
+}
+
+func TestInvalidValue(t *testing.T) {
+ var tristate triStateValue
+ f := setUpFlagSet(&tristate)
+ var buf bytes.Buffer
+ f.SetOutput(&buf)
+ err := f.Parse([]string{"--tristate=invalid"})
+ if err == nil {
+ t.Fatal("expected an error but did not get any, tristate has value", tristate)
+ }
+}
+
+func TestBoolP(t *testing.T) {
+ b := BoolP("bool", "b", false, "bool value in CommandLine")
+ c := BoolP("c", "c", false, "other bool value")
+ args := []string{"--bool"}
+ if err := CommandLine.Parse(args); err != nil {
+ t.Error("expected no error, got ", err)
+ }
+ if *b != true {
+ t.Errorf("expected b=true got b=%v", *b)
+ }
+ if *c != false {
+ t.Errorf("expect c=false got c=%v", *c)
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/bytes_test.go b/vendor/github.com/spf13/pflag/bytes_test.go
new file mode 100644
index 0000000..cc4a769
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bytes_test.go
@@ -0,0 +1,72 @@
+package pflag
+
+import (
+ "fmt"
+ "os"
+ "testing"
+)
+
+func setUpBytesHex(bytesHex *[]byte) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.BytesHexVar(bytesHex, "bytes", []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}, "Some bytes in HEX")
+ f.BytesHexVarP(bytesHex, "bytes2", "B", []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}, "Some bytes in HEX")
+ return f
+}
+
+func TestBytesHex(t *testing.T) {
+ testCases := []struct {
+ input string
+ success bool
+ expected string
+ }{
+ /// Positive cases
+ {"", true, ""}, // Is empty string OK ?
+ {"01", true, "01"},
+ {"0101", true, "0101"},
+ {"1234567890abcdef", true, "1234567890ABCDEF"},
+ {"1234567890ABCDEF", true, "1234567890ABCDEF"},
+
+ // Negative cases
+ {"0", false, ""}, // Short string
+ {"000", false, ""}, /// Odd-length string
+ {"qq", false, ""}, /// non-hex character
+ }
+
+ devnull, _ := os.Open(os.DevNull)
+ os.Stderr = devnull
+
+ for i := range testCases {
+ var bytesHex []byte
+ f := setUpBytesHex(&bytesHex)
+
+ tc := &testCases[i]
+
+ // --bytes
+ args := []string{
+ fmt.Sprintf("--bytes=%s", tc.input),
+ fmt.Sprintf("-B %s", tc.input),
+ fmt.Sprintf("--bytes2=%s", tc.input),
+ }
+
+ for _, arg := range args {
+ err := f.Parse([]string{arg})
+
+ if err != nil && tc.success == true {
+ t.Errorf("expected success, got %q", err)
+ continue
+ } else if err == nil && tc.success == false {
+ // bytesHex, err := f.GetBytesHex("bytes")
+ t.Errorf("expected failure while processing %q", tc.input)
+ continue
+ } else if tc.success {
+ bytesHex, err := f.GetBytesHex("bytes")
+ if err != nil {
+ t.Errorf("Got error trying to fetch the IP flag: %v", err)
+ }
+ if fmt.Sprintf("%X", bytesHex) != tc.expected {
+ t.Errorf("expected %q, got '%X'", tc.expected, bytesHex)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/count_test.go b/vendor/github.com/spf13/pflag/count_test.go
new file mode 100644
index 0000000..3785d37
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/count_test.go
@@ -0,0 +1,56 @@
+package pflag
+
+import (
+ "os"
+ "testing"
+)
+
+func setUpCount(c *int) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.CountVarP(c, "verbose", "v", "a counter")
+ return f
+}
+
+func TestCount(t *testing.T) {
+ testCases := []struct {
+ input []string
+ success bool
+ expected int
+ }{
+ {[]string{}, true, 0},
+ {[]string{"-v"}, true, 1},
+ {[]string{"-vvv"}, true, 3},
+ {[]string{"-v", "-v", "-v"}, true, 3},
+ {[]string{"-v", "--verbose", "-v"}, true, 3},
+ {[]string{"-v=3", "-v"}, true, 4},
+ {[]string{"--verbose=0"}, true, 0},
+ {[]string{"-v=0"}, true, 0},
+ {[]string{"-v=a"}, false, 0},
+ }
+
+ devnull, _ := os.Open(os.DevNull)
+ os.Stderr = devnull
+ for i := range testCases {
+ var count int
+ f := setUpCount(&count)
+
+ tc := &testCases[i]
+
+ err := f.Parse(tc.input)
+ if err != nil && tc.success == true {
+ t.Errorf("expected success, got %q", err)
+ continue
+ } else if err == nil && tc.success == false {
+ t.Errorf("expected failure, got success")
+ continue
+ } else if tc.success {
+ c, err := f.GetCount("verbose")
+ if err != nil {
+ t.Errorf("Got error trying to fetch the counter flag")
+ }
+ if c != tc.expected {
+ t.Errorf("expected %d, got %d", tc.expected, c)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/duration_slice_test.go b/vendor/github.com/spf13/pflag/duration_slice_test.go
new file mode 100644
index 0000000..489b012
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration_slice_test.go
@@ -0,0 +1,165 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code ds governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+)
+
+func setUpDSFlagSet(dsp *[]time.Duration) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.DurationSliceVar(dsp, "ds", []time.Duration{}, "Command separated list!")
+ return f
+}
+
+func setUpDSFlagSetWithDefault(dsp *[]time.Duration) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.DurationSliceVar(dsp, "ds", []time.Duration{0, 1}, "Command separated list!")
+ return f
+}
+
+func TestEmptyDS(t *testing.T) {
+ var ds []time.Duration
+ f := setUpDSFlagSet(&ds)
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ getDS, err := f.GetDurationSlice("ds")
+ if err != nil {
+ t.Fatal("got an error from GetDurationSlice():", err)
+ }
+ if len(getDS) != 0 {
+ t.Fatalf("got ds %v with len=%d but expected length=0", getDS, len(getDS))
+ }
+}
+
+func TestDS(t *testing.T) {
+ var ds []time.Duration
+ f := setUpDSFlagSet(&ds)
+
+ vals := []string{"1ns", "2ms", "3m", "4h"}
+ arg := fmt.Sprintf("--ds=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range ds {
+ d, err := time.ParseDuration(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if d != v {
+ t.Fatalf("expected ds[%d] to be %s but got: %d", i, vals[i], v)
+ }
+ }
+ getDS, err := f.GetDurationSlice("ds")
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ for i, v := range getDS {
+ d, err := time.ParseDuration(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if d != v {
+ t.Fatalf("expected ds[%d] to be %s but got: %d from GetDurationSlice", i, vals[i], v)
+ }
+ }
+}
+
+func TestDSDefault(t *testing.T) {
+ var ds []time.Duration
+ f := setUpDSFlagSetWithDefault(&ds)
+
+ vals := []string{"0s", "1ns"}
+
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range ds {
+ d, err := time.ParseDuration(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if d != v {
+ t.Fatalf("expected ds[%d] to be %d but got: %d", i, d, v)
+ }
+ }
+
+ getDS, err := f.GetDurationSlice("ds")
+ if err != nil {
+ t.Fatal("got an error from GetDurationSlice():", err)
+ }
+ for i, v := range getDS {
+ d, err := time.ParseDuration(vals[i])
+ if err != nil {
+ t.Fatal("got an error from GetDurationSlice():", err)
+ }
+ if d != v {
+ t.Fatalf("expected ds[%d] to be %d from GetDurationSlice but got: %d", i, d, v)
+ }
+ }
+}
+
+func TestDSWithDefault(t *testing.T) {
+ var ds []time.Duration
+ f := setUpDSFlagSetWithDefault(&ds)
+
+ vals := []string{"1ns", "2ns"}
+ arg := fmt.Sprintf("--ds=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range ds {
+ d, err := time.ParseDuration(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if d != v {
+ t.Fatalf("expected ds[%d] to be %d but got: %d", i, d, v)
+ }
+ }
+
+ getDS, err := f.GetDurationSlice("ds")
+ if err != nil {
+ t.Fatal("got an error from GetDurationSlice():", err)
+ }
+ for i, v := range getDS {
+ d, err := time.ParseDuration(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if d != v {
+ t.Fatalf("expected ds[%d] to be %d from GetDurationSlice but got: %d", i, d, v)
+ }
+ }
+}
+
+func TestDSCalledTwice(t *testing.T) {
+ var ds []time.Duration
+ f := setUpDSFlagSet(&ds)
+
+ in := []string{"1ns,2ns", "3ns"}
+ expected := []time.Duration{1, 2, 3}
+ argfmt := "--ds=%s"
+ arg1 := fmt.Sprintf(argfmt, in[0])
+ arg2 := fmt.Sprintf(argfmt, in[1])
+ err := f.Parse([]string{arg1, arg2})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range ds {
+ if expected[i] != v {
+ t.Fatalf("expected ds[%d] to be %d but got: %d", i, expected[i], v)
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/example_test.go b/vendor/github.com/spf13/pflag/example_test.go
new file mode 100644
index 0000000..abd7806
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/example_test.go
@@ -0,0 +1,36 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag_test
+
+import (
+ "fmt"
+
+ "github.com/spf13/pflag"
+)
+
+func ExampleShorthandLookup() {
+ name := "verbose"
+ short := name[:1]
+
+ pflag.BoolP(name, short, false, "verbose output")
+
+ // len(short) must be == 1
+ flag := pflag.ShorthandLookup(short)
+
+ fmt.Println(flag.Name)
+}
+
+func ExampleFlagSet_ShorthandLookup() {
+ name := "verbose"
+ short := name[:1]
+
+ fs := pflag.NewFlagSet("Example", pflag.ContinueOnError)
+ fs.BoolP(name, short, false, "verbose output")
+
+ // len(short) must be == 1
+ flag := fs.ShorthandLookup(short)
+
+ fmt.Println(flag.Name)
+}
diff --git a/vendor/github.com/spf13/pflag/export_test.go b/vendor/github.com/spf13/pflag/export_test.go
new file mode 100644
index 0000000..9318fee
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/export_test.go
@@ -0,0 +1,29 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ "io/ioutil"
+ "os"
+)
+
+// Additional routines compiled into the package only during testing.
+
+// ResetForTesting clears all flag state and sets the usage function as directed.
+// After calling ResetForTesting, parse errors in flag handling will not
+// exit the program.
+func ResetForTesting(usage func()) {
+ CommandLine = &FlagSet{
+ name: os.Args[0],
+ errorHandling: ContinueOnError,
+ output: ioutil.Discard,
+ }
+ Usage = usage
+}
+
+// GetCommandLine returns the default FlagSet.
+func GetCommandLine() *FlagSet {
+ return CommandLine
+}
diff --git a/vendor/github.com/spf13/pflag/flag_test.go b/vendor/github.com/spf13/pflag/flag_test.go
new file mode 100644
index 0000000..f600f0a
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/flag_test.go
@@ -0,0 +1,1259 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+)
+
+var (
+ testBool = Bool("test_bool", false, "bool value")
+ testInt = Int("test_int", 0, "int value")
+ testInt64 = Int64("test_int64", 0, "int64 value")
+ testUint = Uint("test_uint", 0, "uint value")
+ testUint64 = Uint64("test_uint64", 0, "uint64 value")
+ testString = String("test_string", "0", "string value")
+ testFloat = Float64("test_float64", 0, "float64 value")
+ testDuration = Duration("test_duration", 0, "time.Duration value")
+ testOptionalInt = Int("test_optional_int", 0, "optional int value")
+ normalizeFlagNameInvocations = 0
+)
+
+func boolString(s string) string {
+ if s == "0" {
+ return "false"
+ }
+ return "true"
+}
+
+func TestEverything(t *testing.T) {
+ m := make(map[string]*Flag)
+ desired := "0"
+ visitor := func(f *Flag) {
+ if len(f.Name) > 5 && f.Name[0:5] == "test_" {
+ m[f.Name] = f
+ ok := false
+ switch {
+ case f.Value.String() == desired:
+ ok = true
+ case f.Name == "test_bool" && f.Value.String() == boolString(desired):
+ ok = true
+ case f.Name == "test_duration" && f.Value.String() == desired+"s":
+ ok = true
+ }
+ if !ok {
+ t.Error("Visit: bad value", f.Value.String(), "for", f.Name)
+ }
+ }
+ }
+ VisitAll(visitor)
+ if len(m) != 9 {
+ t.Error("VisitAll misses some flags")
+ for k, v := range m {
+ t.Log(k, *v)
+ }
+ }
+ m = make(map[string]*Flag)
+ Visit(visitor)
+ if len(m) != 0 {
+ t.Errorf("Visit sees unset flags")
+ for k, v := range m {
+ t.Log(k, *v)
+ }
+ }
+ // Now set all flags
+ Set("test_bool", "true")
+ Set("test_int", "1")
+ Set("test_int64", "1")
+ Set("test_uint", "1")
+ Set("test_uint64", "1")
+ Set("test_string", "1")
+ Set("test_float64", "1")
+ Set("test_duration", "1s")
+ Set("test_optional_int", "1")
+ desired = "1"
+ Visit(visitor)
+ if len(m) != 9 {
+ t.Error("Visit fails after set")
+ for k, v := range m {
+ t.Log(k, *v)
+ }
+ }
+ // Now test they're visited in sort order.
+ var flagNames []string
+ Visit(func(f *Flag) { flagNames = append(flagNames, f.Name) })
+ if !sort.StringsAreSorted(flagNames) {
+ t.Errorf("flag names not sorted: %v", flagNames)
+ }
+}
+
+func TestUsage(t *testing.T) {
+ called := false
+ ResetForTesting(func() { called = true })
+ if GetCommandLine().Parse([]string{"--x"}) == nil {
+ t.Error("parse did not fail for unknown flag")
+ }
+ if called {
+ t.Error("did call Usage while using ContinueOnError")
+ }
+}
+
+func TestAddFlagSet(t *testing.T) {
+ oldSet := NewFlagSet("old", ContinueOnError)
+ newSet := NewFlagSet("new", ContinueOnError)
+
+ oldSet.String("flag1", "flag1", "flag1")
+ oldSet.String("flag2", "flag2", "flag2")
+
+ newSet.String("flag2", "flag2", "flag2")
+ newSet.String("flag3", "flag3", "flag3")
+
+ oldSet.AddFlagSet(newSet)
+
+ if len(oldSet.formal) != 3 {
+ t.Errorf("Unexpected result adding a FlagSet to a FlagSet %v", oldSet)
+ }
+}
+
+func TestAnnotation(t *testing.T) {
+ f := NewFlagSet("shorthand", ContinueOnError)
+
+ if err := f.SetAnnotation("missing-flag", "key", nil); err == nil {
+ t.Errorf("Expected error setting annotation on non-existent flag")
+ }
+
+ f.StringP("stringa", "a", "", "string value")
+ if err := f.SetAnnotation("stringa", "key", nil); err != nil {
+ t.Errorf("Unexpected error setting new nil annotation: %v", err)
+ }
+ if annotation := f.Lookup("stringa").Annotations["key"]; annotation != nil {
+ t.Errorf("Unexpected annotation: %v", annotation)
+ }
+
+ f.StringP("stringb", "b", "", "string2 value")
+ if err := f.SetAnnotation("stringb", "key", []string{"value1"}); err != nil {
+ t.Errorf("Unexpected error setting new annotation: %v", err)
+ }
+ if annotation := f.Lookup("stringb").Annotations["key"]; !reflect.DeepEqual(annotation, []string{"value1"}) {
+ t.Errorf("Unexpected annotation: %v", annotation)
+ }
+
+ if err := f.SetAnnotation("stringb", "key", []string{"value2"}); err != nil {
+ t.Errorf("Unexpected error updating annotation: %v", err)
+ }
+ if annotation := f.Lookup("stringb").Annotations["key"]; !reflect.DeepEqual(annotation, []string{"value2"}) {
+ t.Errorf("Unexpected annotation: %v", annotation)
+ }
+}
+
+func testParse(f *FlagSet, t *testing.T) {
+ if f.Parsed() {
+ t.Error("f.Parse() = true before Parse")
+ }
+ boolFlag := f.Bool("bool", false, "bool value")
+ bool2Flag := f.Bool("bool2", false, "bool2 value")
+ bool3Flag := f.Bool("bool3", false, "bool3 value")
+ intFlag := f.Int("int", 0, "int value")
+ int8Flag := f.Int8("int8", 0, "int value")
+ int16Flag := f.Int16("int16", 0, "int value")
+ int32Flag := f.Int32("int32", 0, "int value")
+ int64Flag := f.Int64("int64", 0, "int64 value")
+ uintFlag := f.Uint("uint", 0, "uint value")
+ uint8Flag := f.Uint8("uint8", 0, "uint value")
+ uint16Flag := f.Uint16("uint16", 0, "uint value")
+ uint32Flag := f.Uint32("uint32", 0, "uint value")
+ uint64Flag := f.Uint64("uint64", 0, "uint64 value")
+ stringFlag := f.String("string", "0", "string value")
+ float32Flag := f.Float32("float32", 0, "float32 value")
+ float64Flag := f.Float64("float64", 0, "float64 value")
+ ipFlag := f.IP("ip", net.ParseIP("127.0.0.1"), "ip value")
+ maskFlag := f.IPMask("mask", ParseIPv4Mask("0.0.0.0"), "mask value")
+ durationFlag := f.Duration("duration", 5*time.Second, "time.Duration value")
+ optionalIntNoValueFlag := f.Int("optional-int-no-value", 0, "int value")
+ f.Lookup("optional-int-no-value").NoOptDefVal = "9"
+ optionalIntWithValueFlag := f.Int("optional-int-with-value", 0, "int value")
+ f.Lookup("optional-int-no-value").NoOptDefVal = "9"
+ extra := "one-extra-argument"
+ args := []string{
+ "--bool",
+ "--bool2=true",
+ "--bool3=false",
+ "--int=22",
+ "--int8=-8",
+ "--int16=-16",
+ "--int32=-32",
+ "--int64=0x23",
+ "--uint", "24",
+ "--uint8=8",
+ "--uint16=16",
+ "--uint32=32",
+ "--uint64=25",
+ "--string=hello",
+ "--float32=-172e12",
+ "--float64=2718e28",
+ "--ip=10.11.12.13",
+ "--mask=255.255.255.0",
+ "--duration=2m",
+ "--optional-int-no-value",
+ "--optional-int-with-value=42",
+ extra,
+ }
+ if err := f.Parse(args); err != nil {
+ t.Fatal(err)
+ }
+ if !f.Parsed() {
+ t.Error("f.Parse() = false after Parse")
+ }
+ if *boolFlag != true {
+ t.Error("bool flag should be true, is ", *boolFlag)
+ }
+ if v, err := f.GetBool("bool"); err != nil || v != *boolFlag {
+ t.Error("GetBool does not work.")
+ }
+ if *bool2Flag != true {
+ t.Error("bool2 flag should be true, is ", *bool2Flag)
+ }
+ if *bool3Flag != false {
+ t.Error("bool3 flag should be false, is ", *bool2Flag)
+ }
+ if *intFlag != 22 {
+ t.Error("int flag should be 22, is ", *intFlag)
+ }
+ if v, err := f.GetInt("int"); err != nil || v != *intFlag {
+ t.Error("GetInt does not work.")
+ }
+ if *int8Flag != -8 {
+ t.Error("int8 flag should be 0x23, is ", *int8Flag)
+ }
+ if *int16Flag != -16 {
+ t.Error("int16 flag should be -16, is ", *int16Flag)
+ }
+ if v, err := f.GetInt8("int8"); err != nil || v != *int8Flag {
+ t.Error("GetInt8 does not work.")
+ }
+ if v, err := f.GetInt16("int16"); err != nil || v != *int16Flag {
+ t.Error("GetInt16 does not work.")
+ }
+ if *int32Flag != -32 {
+ t.Error("int32 flag should be 0x23, is ", *int32Flag)
+ }
+ if v, err := f.GetInt32("int32"); err != nil || v != *int32Flag {
+ t.Error("GetInt32 does not work.")
+ }
+ if *int64Flag != 0x23 {
+ t.Error("int64 flag should be 0x23, is ", *int64Flag)
+ }
+ if v, err := f.GetInt64("int64"); err != nil || v != *int64Flag {
+ t.Error("GetInt64 does not work.")
+ }
+ if *uintFlag != 24 {
+ t.Error("uint flag should be 24, is ", *uintFlag)
+ }
+ if v, err := f.GetUint("uint"); err != nil || v != *uintFlag {
+ t.Error("GetUint does not work.")
+ }
+ if *uint8Flag != 8 {
+ t.Error("uint8 flag should be 8, is ", *uint8Flag)
+ }
+ if v, err := f.GetUint8("uint8"); err != nil || v != *uint8Flag {
+ t.Error("GetUint8 does not work.")
+ }
+ if *uint16Flag != 16 {
+ t.Error("uint16 flag should be 16, is ", *uint16Flag)
+ }
+ if v, err := f.GetUint16("uint16"); err != nil || v != *uint16Flag {
+ t.Error("GetUint16 does not work.")
+ }
+ if *uint32Flag != 32 {
+ t.Error("uint32 flag should be 32, is ", *uint32Flag)
+ }
+ if v, err := f.GetUint32("uint32"); err != nil || v != *uint32Flag {
+ t.Error("GetUint32 does not work.")
+ }
+ if *uint64Flag != 25 {
+ t.Error("uint64 flag should be 25, is ", *uint64Flag)
+ }
+ if v, err := f.GetUint64("uint64"); err != nil || v != *uint64Flag {
+ t.Error("GetUint64 does not work.")
+ }
+ if *stringFlag != "hello" {
+ t.Error("string flag should be `hello`, is ", *stringFlag)
+ }
+ if v, err := f.GetString("string"); err != nil || v != *stringFlag {
+ t.Error("GetString does not work.")
+ }
+ if *float32Flag != -172e12 {
+ t.Error("float32 flag should be -172e12, is ", *float32Flag)
+ }
+ if v, err := f.GetFloat32("float32"); err != nil || v != *float32Flag {
+ t.Errorf("GetFloat32 returned %v but float32Flag was %v", v, *float32Flag)
+ }
+ if *float64Flag != 2718e28 {
+ t.Error("float64 flag should be 2718e28, is ", *float64Flag)
+ }
+ if v, err := f.GetFloat64("float64"); err != nil || v != *float64Flag {
+ t.Errorf("GetFloat64 returned %v but float64Flag was %v", v, *float64Flag)
+ }
+ if !(*ipFlag).Equal(net.ParseIP("10.11.12.13")) {
+ t.Error("ip flag should be 10.11.12.13, is ", *ipFlag)
+ }
+ if v, err := f.GetIP("ip"); err != nil || !v.Equal(*ipFlag) {
+ t.Errorf("GetIP returned %v but ipFlag was %v", v, *ipFlag)
+ }
+ if (*maskFlag).String() != ParseIPv4Mask("255.255.255.0").String() {
+ t.Error("mask flag should be 255.255.255.0, is ", (*maskFlag).String())
+ }
+ if v, err := f.GetIPv4Mask("mask"); err != nil || v.String() != (*maskFlag).String() {
+ t.Errorf("GetIP returned %v maskFlag was %v error was %v", v, *maskFlag, err)
+ }
+ if *durationFlag != 2*time.Minute {
+ t.Error("duration flag should be 2m, is ", *durationFlag)
+ }
+ if v, err := f.GetDuration("duration"); err != nil || v != *durationFlag {
+ t.Error("GetDuration does not work.")
+ }
+ if _, err := f.GetInt("duration"); err == nil {
+ t.Error("GetInt parsed a time.Duration?!?!")
+ }
+ if *optionalIntNoValueFlag != 9 {
+ t.Error("optional int flag should be the default value, is ", *optionalIntNoValueFlag)
+ }
+ if *optionalIntWithValueFlag != 42 {
+ t.Error("optional int flag should be 42, is ", *optionalIntWithValueFlag)
+ }
+ if len(f.Args()) != 1 {
+ t.Error("expected one argument, got", len(f.Args()))
+ } else if f.Args()[0] != extra {
+ t.Errorf("expected argument %q got %q", extra, f.Args()[0])
+ }
+}
+
+func testParseAll(f *FlagSet, t *testing.T) {
+ if f.Parsed() {
+ t.Error("f.Parse() = true before Parse")
+ }
+ f.BoolP("boola", "a", false, "bool value")
+ f.BoolP("boolb", "b", false, "bool2 value")
+ f.BoolP("boolc", "c", false, "bool3 value")
+ f.BoolP("boold", "d", false, "bool4 value")
+ f.StringP("stringa", "s", "0", "string value")
+ f.StringP("stringz", "z", "0", "string value")
+ f.StringP("stringx", "x", "0", "string value")
+ f.StringP("stringy", "y", "0", "string value")
+ f.Lookup("stringx").NoOptDefVal = "1"
+ args := []string{
+ "-ab",
+ "-cs=xx",
+ "--stringz=something",
+ "-d=true",
+ "-x",
+ "-y",
+ "ee",
+ }
+ want := []string{
+ "boola", "true",
+ "boolb", "true",
+ "boolc", "true",
+ "stringa", "xx",
+ "stringz", "something",
+ "boold", "true",
+ "stringx", "1",
+ "stringy", "ee",
+ }
+ got := []string{}
+ store := func(flag *Flag, value string) error {
+ got = append(got, flag.Name)
+ if len(value) > 0 {
+ got = append(got, value)
+ }
+ return nil
+ }
+ if err := f.ParseAll(args, store); err != nil {
+ t.Errorf("expected no error, got %s", err)
+ }
+ if !f.Parsed() {
+ t.Errorf("f.Parse() = false after Parse")
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("f.ParseAll() fail to restore the args")
+ t.Errorf("Got: %v", got)
+ t.Errorf("Want: %v", want)
+ }
+}
+
+func testParseWithUnknownFlags(f *FlagSet, t *testing.T) {
+ if f.Parsed() {
+ t.Error("f.Parse() = true before Parse")
+ }
+ f.ParseErrorsWhitelist.UnknownFlags = true
+
+ f.BoolP("boola", "a", false, "bool value")
+ f.BoolP("boolb", "b", false, "bool2 value")
+ f.BoolP("boolc", "c", false, "bool3 value")
+ f.BoolP("boold", "d", false, "bool4 value")
+ f.BoolP("boole", "e", false, "bool4 value")
+ f.StringP("stringa", "s", "0", "string value")
+ f.StringP("stringz", "z", "0", "string value")
+ f.StringP("stringx", "x", "0", "string value")
+ f.StringP("stringy", "y", "0", "string value")
+ f.StringP("stringo", "o", "0", "string value")
+ f.Lookup("stringx").NoOptDefVal = "1"
+ args := []string{
+ "-ab",
+ "-cs=xx",
+ "--stringz=something",
+ "--unknown1",
+ "unknown1Value",
+ "-d=true",
+ "-x",
+ "--unknown2=unknown2Value",
+ "-u=unknown3Value",
+ "-p",
+ "unknown4Value",
+ "-q", //another unknown with bool value
+ "-y",
+ "ee",
+ "--unknown7=unknown7value",
+ "--stringo=ovalue",
+ "--unknown8=unknown8value",
+ "--boole",
+ "--unknown6",
+ }
+ want := []string{
+ "boola", "true",
+ "boolb", "true",
+ "boolc", "true",
+ "stringa", "xx",
+ "stringz", "something",
+ "boold", "true",
+ "stringx", "1",
+ "stringy", "ee",
+ "stringo", "ovalue",
+ "boole", "true",
+ }
+ got := []string{}
+ store := func(flag *Flag, value string) error {
+ got = append(got, flag.Name)
+ if len(value) > 0 {
+ got = append(got, value)
+ }
+ return nil
+ }
+ if err := f.ParseAll(args, store); err != nil {
+ t.Errorf("expected no error, got %s", err)
+ }
+ if !f.Parsed() {
+ t.Errorf("f.Parse() = false after Parse")
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("f.ParseAll() fail to restore the args")
+ t.Errorf("Got: %v", got)
+ t.Errorf("Want: %v", want)
+ }
+}
+
+func TestShorthand(t *testing.T) {
+ f := NewFlagSet("shorthand", ContinueOnError)
+ if f.Parsed() {
+ t.Error("f.Parse() = true before Parse")
+ }
+ boolaFlag := f.BoolP("boola", "a", false, "bool value")
+ boolbFlag := f.BoolP("boolb", "b", false, "bool2 value")
+ boolcFlag := f.BoolP("boolc", "c", false, "bool3 value")
+ booldFlag := f.BoolP("boold", "d", false, "bool4 value")
+ stringaFlag := f.StringP("stringa", "s", "0", "string value")
+ stringzFlag := f.StringP("stringz", "z", "0", "string value")
+ extra := "interspersed-argument"
+ notaflag := "--i-look-like-a-flag"
+ args := []string{
+ "-ab",
+ extra,
+ "-cs",
+ "hello",
+ "-z=something",
+ "-d=true",
+ "--",
+ notaflag,
+ }
+ f.SetOutput(ioutil.Discard)
+ if err := f.Parse(args); err != nil {
+ t.Error("expected no error, got ", err)
+ }
+ if !f.Parsed() {
+ t.Error("f.Parse() = false after Parse")
+ }
+ if *boolaFlag != true {
+ t.Error("boola flag should be true, is ", *boolaFlag)
+ }
+ if *boolbFlag != true {
+ t.Error("boolb flag should be true, is ", *boolbFlag)
+ }
+ if *boolcFlag != true {
+ t.Error("boolc flag should be true, is ", *boolcFlag)
+ }
+ if *booldFlag != true {
+ t.Error("boold flag should be true, is ", *booldFlag)
+ }
+ if *stringaFlag != "hello" {
+ t.Error("stringa flag should be `hello`, is ", *stringaFlag)
+ }
+ if *stringzFlag != "something" {
+ t.Error("stringz flag should be `something`, is ", *stringzFlag)
+ }
+ if len(f.Args()) != 2 {
+ t.Error("expected one argument, got", len(f.Args()))
+ } else if f.Args()[0] != extra {
+ t.Errorf("expected argument %q got %q", extra, f.Args()[0])
+ } else if f.Args()[1] != notaflag {
+ t.Errorf("expected argument %q got %q", notaflag, f.Args()[1])
+ }
+ if f.ArgsLenAtDash() != 1 {
+ t.Errorf("expected argsLenAtDash %d got %d", f.ArgsLenAtDash(), 1)
+ }
+}
+
+func TestShorthandLookup(t *testing.T) {
+ f := NewFlagSet("shorthand", ContinueOnError)
+ if f.Parsed() {
+ t.Error("f.Parse() = true before Parse")
+ }
+ f.BoolP("boola", "a", false, "bool value")
+ f.BoolP("boolb", "b", false, "bool2 value")
+ args := []string{
+ "-ab",
+ }
+ f.SetOutput(ioutil.Discard)
+ if err := f.Parse(args); err != nil {
+ t.Error("expected no error, got ", err)
+ }
+ if !f.Parsed() {
+ t.Error("f.Parse() = false after Parse")
+ }
+ flag := f.ShorthandLookup("a")
+ if flag == nil {
+ t.Errorf("f.ShorthandLookup(\"a\") returned nil")
+ }
+ if flag.Name != "boola" {
+ t.Errorf("f.ShorthandLookup(\"a\") found %q instead of \"boola\"", flag.Name)
+ }
+ flag = f.ShorthandLookup("")
+ if flag != nil {
+ t.Errorf("f.ShorthandLookup(\"\") did not return nil")
+ }
+ defer func() {
+ recover()
+ }()
+ flag = f.ShorthandLookup("ab")
+ // should NEVER get here. lookup should panic. defer'd func should recover it.
+ t.Errorf("f.ShorthandLookup(\"ab\") did not panic")
+}
+
+func TestParse(t *testing.T) {
+ ResetForTesting(func() { t.Error("bad parse") })
+ testParse(GetCommandLine(), t)
+}
+
+func TestParseAll(t *testing.T) {
+ ResetForTesting(func() { t.Error("bad parse") })
+ testParseAll(GetCommandLine(), t)
+}
+
+func TestIgnoreUnknownFlags(t *testing.T) {
+ ResetForTesting(func() { t.Error("bad parse") })
+ testParseWithUnknownFlags(GetCommandLine(), t)
+}
+
+func TestFlagSetParse(t *testing.T) {
+ testParse(NewFlagSet("test", ContinueOnError), t)
+}
+
+func TestChangedHelper(t *testing.T) {
+ f := NewFlagSet("changedtest", ContinueOnError)
+ f.Bool("changed", false, "changed bool")
+ f.Bool("settrue", true, "true to true")
+ f.Bool("setfalse", false, "false to false")
+ f.Bool("unchanged", false, "unchanged bool")
+
+ args := []string{"--changed", "--settrue", "--setfalse=false"}
+ if err := f.Parse(args); err != nil {
+ t.Error("f.Parse() = false after Parse")
+ }
+ if !f.Changed("changed") {
+ t.Errorf("--changed wasn't changed!")
+ }
+ if !f.Changed("settrue") {
+ t.Errorf("--settrue wasn't changed!")
+ }
+ if !f.Changed("setfalse") {
+ t.Errorf("--setfalse wasn't changed!")
+ }
+ if f.Changed("unchanged") {
+ t.Errorf("--unchanged was changed!")
+ }
+ if f.Changed("invalid") {
+ t.Errorf("--invalid was changed!")
+ }
+ if f.ArgsLenAtDash() != -1 {
+ t.Errorf("Expected argsLenAtDash: %d but got %d", -1, f.ArgsLenAtDash())
+ }
+}
+
+func replaceSeparators(name string, from []string, to string) string {
+ result := name
+ for _, sep := range from {
+ result = strings.Replace(result, sep, to, -1)
+ }
+ // Type convert to indicate normalization has been done.
+ return result
+}
+
+func wordSepNormalizeFunc(f *FlagSet, name string) NormalizedName {
+ seps := []string{"-", "_"}
+ name = replaceSeparators(name, seps, ".")
+ normalizeFlagNameInvocations++
+
+ return NormalizedName(name)
+}
+
+func testWordSepNormalizedNames(args []string, t *testing.T) {
+ f := NewFlagSet("normalized", ContinueOnError)
+ if f.Parsed() {
+ t.Error("f.Parse() = true before Parse")
+ }
+ withDashFlag := f.Bool("with-dash-flag", false, "bool value")
+ // Set this after some flags have been added and before others.
+ f.SetNormalizeFunc(wordSepNormalizeFunc)
+ withUnderFlag := f.Bool("with_under_flag", false, "bool value")
+ withBothFlag := f.Bool("with-both_flag", false, "bool value")
+ if err := f.Parse(args); err != nil {
+ t.Fatal(err)
+ }
+ if !f.Parsed() {
+ t.Error("f.Parse() = false after Parse")
+ }
+ if *withDashFlag != true {
+ t.Error("withDashFlag flag should be true, is ", *withDashFlag)
+ }
+ if *withUnderFlag != true {
+ t.Error("withUnderFlag flag should be true, is ", *withUnderFlag)
+ }
+ if *withBothFlag != true {
+ t.Error("withBothFlag flag should be true, is ", *withBothFlag)
+ }
+}
+
+func TestWordSepNormalizedNames(t *testing.T) {
+ args := []string{
+ "--with-dash-flag",
+ "--with-under-flag",
+ "--with-both-flag",
+ }
+ testWordSepNormalizedNames(args, t)
+
+ args = []string{
+ "--with_dash_flag",
+ "--with_under_flag",
+ "--with_both_flag",
+ }
+ testWordSepNormalizedNames(args, t)
+
+ args = []string{
+ "--with-dash_flag",
+ "--with-under_flag",
+ "--with-both_flag",
+ }
+ testWordSepNormalizedNames(args, t)
+}
+
+func aliasAndWordSepFlagNames(f *FlagSet, name string) NormalizedName {
+ seps := []string{"-", "_"}
+
+ oldName := replaceSeparators("old-valid_flag", seps, ".")
+ newName := replaceSeparators("valid-flag", seps, ".")
+
+ name = replaceSeparators(name, seps, ".")
+ switch name {
+ case oldName:
+ name = newName
+ }
+
+ return NormalizedName(name)
+}
+
+func TestCustomNormalizedNames(t *testing.T) {
+ f := NewFlagSet("normalized", ContinueOnError)
+ if f.Parsed() {
+ t.Error("f.Parse() = true before Parse")
+ }
+
+ validFlag := f.Bool("valid-flag", false, "bool value")
+ f.SetNormalizeFunc(aliasAndWordSepFlagNames)
+ someOtherFlag := f.Bool("some-other-flag", false, "bool value")
+
+ args := []string{"--old_valid_flag", "--some-other_flag"}
+ if err := f.Parse(args); err != nil {
+ t.Fatal(err)
+ }
+
+ if *validFlag != true {
+ t.Errorf("validFlag is %v even though we set the alias --old_valid_falg", *validFlag)
+ }
+ if *someOtherFlag != true {
+ t.Error("someOtherFlag should be true, is ", *someOtherFlag)
+ }
+}
+
+// Every flag we add, the name (displayed also in usage) should normalized
+func TestNormalizationFuncShouldChangeFlagName(t *testing.T) {
+ // Test normalization after addition
+ f := NewFlagSet("normalized", ContinueOnError)
+
+ f.Bool("valid_flag", false, "bool value")
+ if f.Lookup("valid_flag").Name != "valid_flag" {
+ t.Error("The new flag should have the name 'valid_flag' instead of ", f.Lookup("valid_flag").Name)
+ }
+
+ f.SetNormalizeFunc(wordSepNormalizeFunc)
+ if f.Lookup("valid_flag").Name != "valid.flag" {
+ t.Error("The new flag should have the name 'valid.flag' instead of ", f.Lookup("valid_flag").Name)
+ }
+
+ // Test normalization before addition
+ f = NewFlagSet("normalized", ContinueOnError)
+ f.SetNormalizeFunc(wordSepNormalizeFunc)
+
+ f.Bool("valid_flag", false, "bool value")
+ if f.Lookup("valid_flag").Name != "valid.flag" {
+ t.Error("The new flag should have the name 'valid.flag' instead of ", f.Lookup("valid_flag").Name)
+ }
+}
+
+// Related to https://github.com/spf13/cobra/issues/521.
+func TestNormalizationSharedFlags(t *testing.T) {
+ f := NewFlagSet("set f", ContinueOnError)
+ g := NewFlagSet("set g", ContinueOnError)
+ nfunc := wordSepNormalizeFunc
+ testName := "valid_flag"
+ normName := nfunc(nil, testName)
+ if testName == string(normName) {
+ t.Error("TestNormalizationSharedFlags meaningless: the original and normalized flag names are identical:", testName)
+ }
+
+ f.Bool(testName, false, "bool value")
+ g.AddFlagSet(f)
+
+ f.SetNormalizeFunc(nfunc)
+ g.SetNormalizeFunc(nfunc)
+
+ if len(f.formal) != 1 {
+ t.Error("Normalizing flags should not result in duplications in the flag set:", f.formal)
+ }
+ if f.orderedFormal[0].Name != string(normName) {
+ t.Error("Flag name not normalized")
+ }
+ for k := range f.formal {
+ if k != "valid.flag" {
+ t.Errorf("The key in the flag map should have been normalized: wanted \"%s\", got \"%s\" instead", normName, k)
+ }
+ }
+
+ if !reflect.DeepEqual(f.formal, g.formal) || !reflect.DeepEqual(f.orderedFormal, g.orderedFormal) {
+ t.Error("Two flag sets sharing the same flags should stay consistent after being normalized. Original set:", f.formal, "Duplicate set:", g.formal)
+ }
+}
+
+func TestNormalizationSetFlags(t *testing.T) {
+ f := NewFlagSet("normalized", ContinueOnError)
+ nfunc := wordSepNormalizeFunc
+ testName := "valid_flag"
+ normName := nfunc(nil, testName)
+ if testName == string(normName) {
+ t.Error("TestNormalizationSetFlags meaningless: the original and normalized flag names are identical:", testName)
+ }
+
+ f.Bool(testName, false, "bool value")
+ f.Set(testName, "true")
+ f.SetNormalizeFunc(nfunc)
+
+ if len(f.formal) != 1 {
+ t.Error("Normalizing flags should not result in duplications in the flag set:", f.formal)
+ }
+ if f.orderedFormal[0].Name != string(normName) {
+ t.Error("Flag name not normalized")
+ }
+ for k := range f.formal {
+ if k != "valid.flag" {
+ t.Errorf("The key in the flag map should have been normalized: wanted \"%s\", got \"%s\" instead", normName, k)
+ }
+ }
+
+ if !reflect.DeepEqual(f.formal, f.actual) {
+ t.Error("The map of set flags should get normalized. Formal:", f.formal, "Actual:", f.actual)
+ }
+}
+
+// Declare a user-defined flag type.
+type flagVar []string
+
+func (f *flagVar) String() string {
+ return fmt.Sprint([]string(*f))
+}
+
+func (f *flagVar) Set(value string) error {
+ *f = append(*f, value)
+ return nil
+}
+
+func (f *flagVar) Type() string {
+ return "flagVar"
+}
+
+func TestUserDefined(t *testing.T) {
+ var flags FlagSet
+ flags.Init("test", ContinueOnError)
+ var v flagVar
+ flags.VarP(&v, "v", "v", "usage")
+ if err := flags.Parse([]string{"--v=1", "-v2", "-v", "3"}); err != nil {
+ t.Error(err)
+ }
+ if len(v) != 3 {
+ t.Fatal("expected 3 args; got ", len(v))
+ }
+ expect := "[1 2 3]"
+ if v.String() != expect {
+ t.Errorf("expected value %q got %q", expect, v.String())
+ }
+}
+
+func TestSetOutput(t *testing.T) {
+ var flags FlagSet
+ var buf bytes.Buffer
+ flags.SetOutput(&buf)
+ flags.Init("test", ContinueOnError)
+ flags.Parse([]string{"--unknown"})
+ if out := buf.String(); !strings.Contains(out, "--unknown") {
+ t.Logf("expected output mentioning unknown; got %q", out)
+ }
+}
+
+// This tests that one can reset the flags. This still works but not well, and is
+// superseded by FlagSet.
+func TestChangingArgs(t *testing.T) {
+ ResetForTesting(func() { t.Fatal("bad parse") })
+ oldArgs := os.Args
+ defer func() { os.Args = oldArgs }()
+ os.Args = []string{"cmd", "--before", "subcmd"}
+ before := Bool("before", false, "")
+ if err := GetCommandLine().Parse(os.Args[1:]); err != nil {
+ t.Fatal(err)
+ }
+ cmd := Arg(0)
+ os.Args = []string{"subcmd", "--after", "args"}
+ after := Bool("after", false, "")
+ Parse()
+ args := Args()
+
+ if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" {
+ t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args)
+ }
+}
+
+// Test that -help invokes the usage message and returns ErrHelp.
+func TestHelp(t *testing.T) {
+ var helpCalled = false
+ fs := NewFlagSet("help test", ContinueOnError)
+ fs.Usage = func() { helpCalled = true }
+ var flag bool
+ fs.BoolVar(&flag, "flag", false, "regular flag")
+ // Regular flag invocation should work
+ err := fs.Parse([]string{"--flag=true"})
+ if err != nil {
+ t.Fatal("expected no error; got ", err)
+ }
+ if !flag {
+ t.Error("flag was not set by --flag")
+ }
+ if helpCalled {
+ t.Error("help called for regular flag")
+ helpCalled = false // reset for next test
+ }
+ // Help flag should work as expected.
+ err = fs.Parse([]string{"--help"})
+ if err == nil {
+ t.Fatal("error expected")
+ }
+ if err != ErrHelp {
+ t.Fatal("expected ErrHelp; got ", err)
+ }
+ if !helpCalled {
+ t.Fatal("help was not called")
+ }
+ // If we define a help flag, that should override.
+ var help bool
+ fs.BoolVar(&help, "help", false, "help flag")
+ helpCalled = false
+ err = fs.Parse([]string{"--help"})
+ if err != nil {
+ t.Fatal("expected no error for defined --help; got ", err)
+ }
+ if helpCalled {
+ t.Fatal("help was called; should not have been for defined help flag")
+ }
+}
+
+func TestNoInterspersed(t *testing.T) {
+ f := NewFlagSet("test", ContinueOnError)
+ f.SetInterspersed(false)
+ f.Bool("true", true, "always true")
+ f.Bool("false", false, "always false")
+ err := f.Parse([]string{"--true", "break", "--false"})
+ if err != nil {
+ t.Fatal("expected no error; got ", err)
+ }
+ args := f.Args()
+ if len(args) != 2 || args[0] != "break" || args[1] != "--false" {
+ t.Fatal("expected interspersed options/non-options to fail")
+ }
+}
+
+func TestTermination(t *testing.T) {
+ f := NewFlagSet("termination", ContinueOnError)
+ boolFlag := f.BoolP("bool", "l", false, "bool value")
+ if f.Parsed() {
+ t.Error("f.Parse() = true before Parse")
+ }
+ arg1 := "ls"
+ arg2 := "-l"
+ args := []string{
+ "--",
+ arg1,
+ arg2,
+ }
+ f.SetOutput(ioutil.Discard)
+ if err := f.Parse(args); err != nil {
+ t.Fatal("expected no error; got ", err)
+ }
+ if !f.Parsed() {
+ t.Error("f.Parse() = false after Parse")
+ }
+ if *boolFlag {
+ t.Error("expected boolFlag=false, got true")
+ }
+ if len(f.Args()) != 2 {
+ t.Errorf("expected 2 arguments, got %d: %v", len(f.Args()), f.Args())
+ }
+ if f.Args()[0] != arg1 {
+ t.Errorf("expected argument %q got %q", arg1, f.Args()[0])
+ }
+ if f.Args()[1] != arg2 {
+ t.Errorf("expected argument %q got %q", arg2, f.Args()[1])
+ }
+ if f.ArgsLenAtDash() != 0 {
+ t.Errorf("expected argsLenAtDash %d got %d", 0, f.ArgsLenAtDash())
+ }
+}
+
+func getDeprecatedFlagSet() *FlagSet {
+ f := NewFlagSet("bob", ContinueOnError)
+ f.Bool("badflag", true, "always true")
+ f.MarkDeprecated("badflag", "use --good-flag instead")
+ return f
+}
+func TestDeprecatedFlagInDocs(t *testing.T) {
+ f := getDeprecatedFlagSet()
+
+ out := new(bytes.Buffer)
+ f.SetOutput(out)
+ f.PrintDefaults()
+
+ if strings.Contains(out.String(), "badflag") {
+ t.Errorf("found deprecated flag in usage!")
+ }
+}
+
+func TestUnHiddenDeprecatedFlagInDocs(t *testing.T) {
+ f := getDeprecatedFlagSet()
+ flg := f.Lookup("badflag")
+ if flg == nil {
+ t.Fatalf("Unable to lookup 'bob' in TestUnHiddenDeprecatedFlagInDocs")
+ }
+ flg.Hidden = false
+
+ out := new(bytes.Buffer)
+ f.SetOutput(out)
+ f.PrintDefaults()
+
+ defaults := out.String()
+ if !strings.Contains(defaults, "badflag") {
+ t.Errorf("Did not find deprecated flag in usage!")
+ }
+ if !strings.Contains(defaults, "use --good-flag instead") {
+ t.Errorf("Did not find 'use --good-flag instead' in defaults")
+ }
+}
+
+func TestDeprecatedFlagShorthandInDocs(t *testing.T) {
+ f := NewFlagSet("bob", ContinueOnError)
+ name := "noshorthandflag"
+ f.BoolP(name, "n", true, "always true")
+ f.MarkShorthandDeprecated("noshorthandflag", fmt.Sprintf("use --%s instead", name))
+
+ out := new(bytes.Buffer)
+ f.SetOutput(out)
+ f.PrintDefaults()
+
+ if strings.Contains(out.String(), "-n,") {
+ t.Errorf("found deprecated flag shorthand in usage!")
+ }
+}
+
+func parseReturnStderr(t *testing.T, f *FlagSet, args []string) (string, error) {
+ oldStderr := os.Stderr
+ r, w, _ := os.Pipe()
+ os.Stderr = w
+
+ err := f.Parse(args)
+
+ outC := make(chan string)
+ // copy the output in a separate goroutine so printing can't block indefinitely
+ go func() {
+ var buf bytes.Buffer
+ io.Copy(&buf, r)
+ outC <- buf.String()
+ }()
+
+ w.Close()
+ os.Stderr = oldStderr
+ out := <-outC
+
+ return out, err
+}
+
+func TestDeprecatedFlagUsage(t *testing.T) {
+ f := NewFlagSet("bob", ContinueOnError)
+ f.Bool("badflag", true, "always true")
+ usageMsg := "use --good-flag instead"
+ f.MarkDeprecated("badflag", usageMsg)
+
+ args := []string{"--badflag"}
+ out, err := parseReturnStderr(t, f, args)
+ if err != nil {
+ t.Fatal("expected no error; got ", err)
+ }
+
+ if !strings.Contains(out, usageMsg) {
+ t.Errorf("usageMsg not printed when using a deprecated flag!")
+ }
+}
+
+func TestDeprecatedFlagShorthandUsage(t *testing.T) {
+ f := NewFlagSet("bob", ContinueOnError)
+ name := "noshorthandflag"
+ f.BoolP(name, "n", true, "always true")
+ usageMsg := fmt.Sprintf("use --%s instead", name)
+ f.MarkShorthandDeprecated(name, usageMsg)
+
+ args := []string{"-n"}
+ out, err := parseReturnStderr(t, f, args)
+ if err != nil {
+ t.Fatal("expected no error; got ", err)
+ }
+
+ if !strings.Contains(out, usageMsg) {
+ t.Errorf("usageMsg not printed when using a deprecated flag!")
+ }
+}
+
+func TestDeprecatedFlagUsageNormalized(t *testing.T) {
+ f := NewFlagSet("bob", ContinueOnError)
+ f.Bool("bad-double_flag", true, "always true")
+ f.SetNormalizeFunc(wordSepNormalizeFunc)
+ usageMsg := "use --good-flag instead"
+ f.MarkDeprecated("bad_double-flag", usageMsg)
+
+ args := []string{"--bad_double_flag"}
+ out, err := parseReturnStderr(t, f, args)
+ if err != nil {
+ t.Fatal("expected no error; got ", err)
+ }
+
+ if !strings.Contains(out, usageMsg) {
+ t.Errorf("usageMsg not printed when using a deprecated flag!")
+ }
+}
+
+// Name normalization function should be called only once on flag addition
+func TestMultipleNormalizeFlagNameInvocations(t *testing.T) {
+ normalizeFlagNameInvocations = 0
+
+ f := NewFlagSet("normalized", ContinueOnError)
+ f.SetNormalizeFunc(wordSepNormalizeFunc)
+ f.Bool("with_under_flag", false, "bool value")
+
+ if normalizeFlagNameInvocations != 1 {
+ t.Fatal("Expected normalizeFlagNameInvocations to be 1; got ", normalizeFlagNameInvocations)
+ }
+}
+
+//
+func TestHiddenFlagInUsage(t *testing.T) {
+ f := NewFlagSet("bob", ContinueOnError)
+ f.Bool("secretFlag", true, "shhh")
+ f.MarkHidden("secretFlag")
+
+ out := new(bytes.Buffer)
+ f.SetOutput(out)
+ f.PrintDefaults()
+
+ if strings.Contains(out.String(), "secretFlag") {
+ t.Errorf("found hidden flag in usage!")
+ }
+}
+
+//
+func TestHiddenFlagUsage(t *testing.T) {
+ f := NewFlagSet("bob", ContinueOnError)
+ f.Bool("secretFlag", true, "shhh")
+ f.MarkHidden("secretFlag")
+
+ args := []string{"--secretFlag"}
+ out, err := parseReturnStderr(t, f, args)
+ if err != nil {
+ t.Fatal("expected no error; got ", err)
+ }
+
+ if strings.Contains(out, "shhh") {
+ t.Errorf("usage message printed when using a hidden flag!")
+ }
+}
+
+const defaultOutput = ` --A for bootstrapping, allow 'any' type
+ --Alongflagname disable bounds checking
+ -C, --CCC a boolean defaulting to true (default true)
+ --D path set relative path for local imports
+ -E, --EEE num[=1234] a num with NoOptDefVal (default 4321)
+ --F number a non-zero number (default 2.7)
+ --G float a float that defaults to zero
+ --IP ip IP address with no default
+ --IPMask ipMask Netmask address with no default
+ --IPNet ipNet IP network with no default
+ --Ints ints int slice with zero default
+ --N int a non-zero int (default 27)
+ --ND1 string[="bar"] a string with NoOptDefVal (default "foo")
+ --ND2 num[=4321] a num with NoOptDefVal (default 1234)
+ --StringArray stringArray string array with zero default
+ --StringSlice strings string slice with zero default
+ --Z int an int that defaults to zero
+ --custom custom custom Value implementation
+ --customP custom a VarP with default (default 10)
+ --maxT timeout set timeout for dial
+ -v, --verbose count verbosity
+`
+
+// Custom value that satisfies the Value interface.
+type customValue int
+
+func (cv *customValue) String() string { return fmt.Sprintf("%v", *cv) }
+
+func (cv *customValue) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *cv = customValue(v)
+ return err
+}
+
+func (cv *customValue) Type() string { return "custom" }
+
+func TestPrintDefaults(t *testing.T) {
+ fs := NewFlagSet("print defaults test", ContinueOnError)
+ var buf bytes.Buffer
+ fs.SetOutput(&buf)
+ fs.Bool("A", false, "for bootstrapping, allow 'any' type")
+ fs.Bool("Alongflagname", false, "disable bounds checking")
+ fs.BoolP("CCC", "C", true, "a boolean defaulting to true")
+ fs.String("D", "", "set relative `path` for local imports")
+ fs.Float64("F", 2.7, "a non-zero `number`")
+ fs.Float64("G", 0, "a float that defaults to zero")
+ fs.Int("N", 27, "a non-zero int")
+ fs.IntSlice("Ints", []int{}, "int slice with zero default")
+ fs.IP("IP", nil, "IP address with no default")
+ fs.IPMask("IPMask", nil, "Netmask address with no default")
+ fs.IPNet("IPNet", net.IPNet{}, "IP network with no default")
+ fs.Int("Z", 0, "an int that defaults to zero")
+ fs.Duration("maxT", 0, "set `timeout` for dial")
+ fs.String("ND1", "foo", "a string with NoOptDefVal")
+ fs.Lookup("ND1").NoOptDefVal = "bar"
+ fs.Int("ND2", 1234, "a `num` with NoOptDefVal")
+ fs.Lookup("ND2").NoOptDefVal = "4321"
+ fs.IntP("EEE", "E", 4321, "a `num` with NoOptDefVal")
+ fs.ShorthandLookup("E").NoOptDefVal = "1234"
+ fs.StringSlice("StringSlice", []string{}, "string slice with zero default")
+ fs.StringArray("StringArray", []string{}, "string array with zero default")
+ fs.CountP("verbose", "v", "verbosity")
+
+ var cv customValue
+ fs.Var(&cv, "custom", "custom Value implementation")
+
+ cv2 := customValue(10)
+ fs.VarP(&cv2, "customP", "", "a VarP with default")
+
+ fs.PrintDefaults()
+ got := buf.String()
+ if got != defaultOutput {
+ fmt.Println("\n" + got)
+ fmt.Println("\n" + defaultOutput)
+ t.Errorf("got %q want %q\n", got, defaultOutput)
+ }
+}
+
+func TestVisitAllFlagOrder(t *testing.T) {
+ fs := NewFlagSet("TestVisitAllFlagOrder", ContinueOnError)
+ fs.SortFlags = false
+ // https://github.com/spf13/pflag/issues/120
+ fs.SetNormalizeFunc(func(f *FlagSet, name string) NormalizedName {
+ return NormalizedName(name)
+ })
+
+ names := []string{"C", "B", "A", "D"}
+ for _, name := range names {
+ fs.Bool(name, false, "")
+ }
+
+ i := 0
+ fs.VisitAll(func(f *Flag) {
+ if names[i] != f.Name {
+ t.Errorf("Incorrect order. Expected %v, got %v", names[i], f.Name)
+ }
+ i++
+ })
+}
+
+func TestVisitFlagOrder(t *testing.T) {
+ fs := NewFlagSet("TestVisitFlagOrder", ContinueOnError)
+ fs.SortFlags = false
+ names := []string{"C", "B", "A", "D"}
+ for _, name := range names {
+ fs.Bool(name, false, "")
+ fs.Set(name, "true")
+ }
+
+ i := 0
+ fs.Visit(func(f *Flag) {
+ if names[i] != f.Name {
+ t.Errorf("Incorrect order. Expected %v, got %v", names[i], f.Name)
+ }
+ i++
+ })
+}
diff --git a/vendor/github.com/spf13/pflag/golangflag_test.go b/vendor/github.com/spf13/pflag/golangflag_test.go
new file mode 100644
index 0000000..5bd831b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/golangflag_test.go
@@ -0,0 +1,47 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ goflag "flag"
+ "testing"
+)
+
+func TestGoflags(t *testing.T) {
+ goflag.String("stringFlag", "stringFlag", "stringFlag")
+ goflag.Bool("boolFlag", false, "boolFlag")
+
+ f := NewFlagSet("test", ContinueOnError)
+
+ f.AddGoFlagSet(goflag.CommandLine)
+ err := f.Parse([]string{"--stringFlag=bob", "--boolFlag"})
+ if err != nil {
+ t.Fatal("expected no error; get", err)
+ }
+
+ getString, err := f.GetString("stringFlag")
+ if err != nil {
+ t.Fatal("expected no error; get", err)
+ }
+ if getString != "bob" {
+ t.Fatalf("expected getString=bob but got getString=%s", getString)
+ }
+
+ getBool, err := f.GetBool("boolFlag")
+ if err != nil {
+ t.Fatal("expected no error; get", err)
+ }
+ if getBool != true {
+ t.Fatalf("expected getBool=true but got getBool=%v", getBool)
+ }
+ if !f.Parsed() {
+ t.Fatal("f.Parsed() return false after f.Parse() called")
+ }
+
+ // in fact it is useless. because `go test` called flag.Parse()
+ if !goflag.CommandLine.Parsed() {
+ t.Fatal("goflag.CommandLine.Parsed() return false after f.Parse() called")
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/int_slice_test.go b/vendor/github.com/spf13/pflag/int_slice_test.go
new file mode 100644
index 0000000..745aecb
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int_slice_test.go
@@ -0,0 +1,165 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func setUpISFlagSet(isp *[]int) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.IntSliceVar(isp, "is", []int{}, "Command separated list!")
+ return f
+}
+
+func setUpISFlagSetWithDefault(isp *[]int) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.IntSliceVar(isp, "is", []int{0, 1}, "Command separated list!")
+ return f
+}
+
+func TestEmptyIS(t *testing.T) {
+ var is []int
+ f := setUpISFlagSet(&is)
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ getIS, err := f.GetIntSlice("is")
+ if err != nil {
+ t.Fatal("got an error from GetIntSlice():", err)
+ }
+ if len(getIS) != 0 {
+ t.Fatalf("got is %v with len=%d but expected length=0", getIS, len(getIS))
+ }
+}
+
+func TestIS(t *testing.T) {
+ var is []int
+ f := setUpISFlagSet(&is)
+
+ vals := []string{"1", "2", "4", "3"}
+ arg := fmt.Sprintf("--is=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range is {
+ d, err := strconv.Atoi(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if d != v {
+ t.Fatalf("expected is[%d] to be %s but got: %d", i, vals[i], v)
+ }
+ }
+ getIS, err := f.GetIntSlice("is")
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ for i, v := range getIS {
+ d, err := strconv.Atoi(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if d != v {
+ t.Fatalf("expected is[%d] to be %s but got: %d from GetIntSlice", i, vals[i], v)
+ }
+ }
+}
+
+func TestISDefault(t *testing.T) {
+ var is []int
+ f := setUpISFlagSetWithDefault(&is)
+
+ vals := []string{"0", "1"}
+
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range is {
+ d, err := strconv.Atoi(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if d != v {
+ t.Fatalf("expected is[%d] to be %d but got: %d", i, d, v)
+ }
+ }
+
+ getIS, err := f.GetIntSlice("is")
+ if err != nil {
+ t.Fatal("got an error from GetIntSlice():", err)
+ }
+ for i, v := range getIS {
+ d, err := strconv.Atoi(vals[i])
+ if err != nil {
+ t.Fatal("got an error from GetIntSlice():", err)
+ }
+ if d != v {
+ t.Fatalf("expected is[%d] to be %d from GetIntSlice but got: %d", i, d, v)
+ }
+ }
+}
+
+func TestISWithDefault(t *testing.T) {
+ var is []int
+ f := setUpISFlagSetWithDefault(&is)
+
+ vals := []string{"1", "2"}
+ arg := fmt.Sprintf("--is=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range is {
+ d, err := strconv.Atoi(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if d != v {
+ t.Fatalf("expected is[%d] to be %d but got: %d", i, d, v)
+ }
+ }
+
+ getIS, err := f.GetIntSlice("is")
+ if err != nil {
+ t.Fatal("got an error from GetIntSlice():", err)
+ }
+ for i, v := range getIS {
+ d, err := strconv.Atoi(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if d != v {
+ t.Fatalf("expected is[%d] to be %d from GetIntSlice but got: %d", i, d, v)
+ }
+ }
+}
+
+func TestISCalledTwice(t *testing.T) {
+ var is []int
+ f := setUpISFlagSet(&is)
+
+ in := []string{"1,2", "3"}
+ expected := []int{1, 2, 3}
+ argfmt := "--is=%s"
+ arg1 := fmt.Sprintf(argfmt, in[0])
+ arg2 := fmt.Sprintf(argfmt, in[1])
+ err := f.Parse([]string{arg1, arg2})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range is {
+ if expected[i] != v {
+ t.Fatalf("expected is[%d] to be %d but got: %d", i, expected[i], v)
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/ip_slice_test.go b/vendor/github.com/spf13/pflag/ip_slice_test.go
new file mode 100644
index 0000000..b0c681c
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip_slice_test.go
@@ -0,0 +1,222 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+ "testing"
+)
+
+func setUpIPSFlagSet(ipsp *[]net.IP) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.IPSliceVar(ipsp, "ips", []net.IP{}, "Command separated list!")
+ return f
+}
+
+func setUpIPSFlagSetWithDefault(ipsp *[]net.IP) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.IPSliceVar(ipsp, "ips",
+ []net.IP{
+ net.ParseIP("192.168.1.1"),
+ net.ParseIP("0:0:0:0:0:0:0:1"),
+ },
+ "Command separated list!")
+ return f
+}
+
+func TestEmptyIP(t *testing.T) {
+ var ips []net.IP
+ f := setUpIPSFlagSet(&ips)
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ getIPS, err := f.GetIPSlice("ips")
+ if err != nil {
+ t.Fatal("got an error from GetIPSlice():", err)
+ }
+ if len(getIPS) != 0 {
+ t.Fatalf("got ips %v with len=%d but expected length=0", getIPS, len(getIPS))
+ }
+}
+
+func TestIPS(t *testing.T) {
+ var ips []net.IP
+ f := setUpIPSFlagSet(&ips)
+
+ vals := []string{"192.168.1.1", "10.0.0.1", "0:0:0:0:0:0:0:2"}
+ arg := fmt.Sprintf("--ips=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range ips {
+ if ip := net.ParseIP(vals[i]); ip == nil {
+ t.Fatalf("invalid string being converted to IP address: %s", vals[i])
+ } else if !ip.Equal(v) {
+ t.Fatalf("expected ips[%d] to be %s but got: %s from GetIPSlice", i, vals[i], v)
+ }
+ }
+}
+
+func TestIPSDefault(t *testing.T) {
+ var ips []net.IP
+ f := setUpIPSFlagSetWithDefault(&ips)
+
+ vals := []string{"192.168.1.1", "0:0:0:0:0:0:0:1"}
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range ips {
+ if ip := net.ParseIP(vals[i]); ip == nil {
+ t.Fatalf("invalid string being converted to IP address: %s", vals[i])
+ } else if !ip.Equal(v) {
+ t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v)
+ }
+ }
+
+ getIPS, err := f.GetIPSlice("ips")
+ if err != nil {
+ t.Fatal("got an error from GetIPSlice")
+ }
+ for i, v := range getIPS {
+ if ip := net.ParseIP(vals[i]); ip == nil {
+ t.Fatalf("invalid string being converted to IP address: %s", vals[i])
+ } else if !ip.Equal(v) {
+ t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v)
+ }
+ }
+}
+
+func TestIPSWithDefault(t *testing.T) {
+ var ips []net.IP
+ f := setUpIPSFlagSetWithDefault(&ips)
+
+ vals := []string{"192.168.1.1", "0:0:0:0:0:0:0:1"}
+ arg := fmt.Sprintf("--ips=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range ips {
+ if ip := net.ParseIP(vals[i]); ip == nil {
+ t.Fatalf("invalid string being converted to IP address: %s", vals[i])
+ } else if !ip.Equal(v) {
+ t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v)
+ }
+ }
+
+ getIPS, err := f.GetIPSlice("ips")
+ if err != nil {
+ t.Fatal("got an error from GetIPSlice")
+ }
+ for i, v := range getIPS {
+ if ip := net.ParseIP(vals[i]); ip == nil {
+ t.Fatalf("invalid string being converted to IP address: %s", vals[i])
+ } else if !ip.Equal(v) {
+ t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v)
+ }
+ }
+}
+
+func TestIPSCalledTwice(t *testing.T) {
+ var ips []net.IP
+ f := setUpIPSFlagSet(&ips)
+
+ in := []string{"192.168.1.2,0:0:0:0:0:0:0:1", "10.0.0.1"}
+ expected := []net.IP{net.ParseIP("192.168.1.2"), net.ParseIP("0:0:0:0:0:0:0:1"), net.ParseIP("10.0.0.1")}
+ argfmt := "ips=%s"
+ arg1 := fmt.Sprintf(argfmt, in[0])
+ arg2 := fmt.Sprintf(argfmt, in[1])
+ err := f.Parse([]string{arg1, arg2})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range ips {
+ if !expected[i].Equal(v) {
+ t.Fatalf("expected ips[%d] to be %s but got: %s", i, expected[i], v)
+ }
+ }
+}
+
+func TestIPSBadQuoting(t *testing.T) {
+
+ tests := []struct {
+ Want []net.IP
+ FlagArg []string
+ }{
+ {
+ Want: []net.IP{
+ net.ParseIP("a4ab:61d:f03e:5d7d:fad7:d4c2:a1a5:568"),
+ net.ParseIP("203.107.49.208"),
+ net.ParseIP("14.57.204.90"),
+ },
+ FlagArg: []string{
+ "a4ab:61d:f03e:5d7d:fad7:d4c2:a1a5:568",
+ "203.107.49.208",
+ "14.57.204.90",
+ },
+ },
+ {
+ Want: []net.IP{
+ net.ParseIP("204.228.73.195"),
+ net.ParseIP("86.141.15.94"),
+ },
+ FlagArg: []string{
+ "204.228.73.195",
+ "86.141.15.94",
+ },
+ },
+ {
+ Want: []net.IP{
+ net.ParseIP("c70c:db36:3001:890f:c6ea:3f9b:7a39:cc3f"),
+ net.ParseIP("4d17:1d6e:e699:bd7a:88c5:5e7e:ac6a:4472"),
+ },
+ FlagArg: []string{
+ "c70c:db36:3001:890f:c6ea:3f9b:7a39:cc3f",
+ "4d17:1d6e:e699:bd7a:88c5:5e7e:ac6a:4472",
+ },
+ },
+ {
+ Want: []net.IP{
+ net.ParseIP("5170:f971:cfac:7be3:512a:af37:952c:bc33"),
+ net.ParseIP("93.21.145.140"),
+ net.ParseIP("2cac:61d3:c5ff:6caf:73e0:1b1a:c336:c1ca"),
+ },
+ FlagArg: []string{
+ " 5170:f971:cfac:7be3:512a:af37:952c:bc33 , 93.21.145.140 ",
+ "2cac:61d3:c5ff:6caf:73e0:1b1a:c336:c1ca",
+ },
+ },
+ {
+ Want: []net.IP{
+ net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"),
+ net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"),
+ net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"),
+ net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"),
+ },
+ FlagArg: []string{
+ `"2e5e:66b2:6441:848:5b74:76ea:574c:3a7b, 2e5e:66b2:6441:848:5b74:76ea:574c:3a7b,2e5e:66b2:6441:848:5b74:76ea:574c:3a7b "`,
+ " 2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"},
+ },
+ }
+
+ for i, test := range tests {
+
+ var ips []net.IP
+ f := setUpIPSFlagSet(&ips)
+
+ if err := f.Parse([]string{fmt.Sprintf("--ips=%s", strings.Join(test.FlagArg, ","))}); err != nil {
+ t.Fatalf("flag parsing failed with error: %s\nparsing:\t%#v\nwant:\t\t%s",
+ err, test.FlagArg, test.Want[i])
+ }
+
+ for j, b := range ips {
+ if !b.Equal(test.Want[j]) {
+ t.Fatalf("bad value parsed for test %d on net.IP %d:\nwant:\t%s\ngot:\t%s", i, j, test.Want[j], b)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/ip_test.go b/vendor/github.com/spf13/pflag/ip_test.go
new file mode 100644
index 0000000..1fec50e
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip_test.go
@@ -0,0 +1,63 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "testing"
+)
+
+func setUpIP(ip *net.IP) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.IPVar(ip, "address", net.ParseIP("0.0.0.0"), "IP Address")
+ return f
+}
+
+func TestIP(t *testing.T) {
+ testCases := []struct {
+ input string
+ success bool
+ expected string
+ }{
+ {"0.0.0.0", true, "0.0.0.0"},
+ {" 0.0.0.0 ", true, "0.0.0.0"},
+ {"1.2.3.4", true, "1.2.3.4"},
+ {"127.0.0.1", true, "127.0.0.1"},
+ {"255.255.255.255", true, "255.255.255.255"},
+ {"", false, ""},
+ {"0", false, ""},
+ {"localhost", false, ""},
+ {"0.0.0", false, ""},
+ {"0.0.0.", false, ""},
+ {"0.0.0.0.", false, ""},
+ {"0.0.0.256", false, ""},
+ {"0 . 0 . 0 . 0", false, ""},
+ }
+
+ devnull, _ := os.Open(os.DevNull)
+ os.Stderr = devnull
+ for i := range testCases {
+ var addr net.IP
+ f := setUpIP(&addr)
+
+ tc := &testCases[i]
+
+ arg := fmt.Sprintf("--address=%s", tc.input)
+ err := f.Parse([]string{arg})
+ if err != nil && tc.success == true {
+ t.Errorf("expected success, got %q", err)
+ continue
+ } else if err == nil && tc.success == false {
+ t.Errorf("expected failure")
+ continue
+ } else if tc.success {
+ ip, err := f.GetIP("address")
+ if err != nil {
+ t.Errorf("Got error trying to fetch the IP flag: %v", err)
+ }
+ if ip.String() != tc.expected {
+ t.Errorf("expected %q, got %q", tc.expected, ip.String())
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/ipnet_test.go b/vendor/github.com/spf13/pflag/ipnet_test.go
new file mode 100644
index 0000000..335b6fa
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipnet_test.go
@@ -0,0 +1,70 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "testing"
+)
+
+func setUpIPNet(ip *net.IPNet) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ _, def, _ := net.ParseCIDR("0.0.0.0/0")
+ f.IPNetVar(ip, "address", *def, "IP Address")
+ return f
+}
+
+func TestIPNet(t *testing.T) {
+ testCases := []struct {
+ input string
+ success bool
+ expected string
+ }{
+ {"0.0.0.0/0", true, "0.0.0.0/0"},
+ {" 0.0.0.0/0 ", true, "0.0.0.0/0"},
+ {"1.2.3.4/8", true, "1.0.0.0/8"},
+ {"127.0.0.1/16", true, "127.0.0.0/16"},
+ {"255.255.255.255/19", true, "255.255.224.0/19"},
+ {"255.255.255.255/32", true, "255.255.255.255/32"},
+ {"", false, ""},
+ {"/0", false, ""},
+ {"0", false, ""},
+ {"0/0", false, ""},
+ {"localhost/0", false, ""},
+ {"0.0.0/4", false, ""},
+ {"0.0.0./8", false, ""},
+ {"0.0.0.0./12", false, ""},
+ {"0.0.0.256/16", false, ""},
+ {"0.0.0.0 /20", false, ""},
+ {"0.0.0.0/ 24", false, ""},
+ {"0 . 0 . 0 . 0 / 28", false, ""},
+ {"0.0.0.0/33", false, ""},
+ }
+
+ devnull, _ := os.Open(os.DevNull)
+ os.Stderr = devnull
+ for i := range testCases {
+ var addr net.IPNet
+ f := setUpIPNet(&addr)
+
+ tc := &testCases[i]
+
+ arg := fmt.Sprintf("--address=%s", tc.input)
+ err := f.Parse([]string{arg})
+ if err != nil && tc.success == true {
+ t.Errorf("expected success, got %q", err)
+ continue
+ } else if err == nil && tc.success == false {
+ t.Errorf("expected failure")
+ continue
+ } else if tc.success {
+ ip, err := f.GetIPNet("address")
+ if err != nil {
+ t.Errorf("Got error trying to fetch the IP flag: %v", err)
+ }
+ if ip.String() != tc.expected {
+ t.Errorf("expected %q, got %q", tc.expected, ip.String())
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/printusage_test.go b/vendor/github.com/spf13/pflag/printusage_test.go
new file mode 100644
index 0000000..df982aa
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/printusage_test.go
@@ -0,0 +1,74 @@
+package pflag
+
+import (
+ "bytes"
+ "io"
+ "testing"
+)
+
+const expectedOutput = ` --long-form Some description
+ --long-form2 Some description
+ with multiline
+ -s, --long-name Some description
+ -t, --long-name2 Some description with
+ multiline
+`
+
+func setUpPFlagSet(buf io.Writer) *FlagSet {
+ f := NewFlagSet("test", ExitOnError)
+ f.Bool("long-form", false, "Some description")
+ f.Bool("long-form2", false, "Some description\n with multiline")
+ f.BoolP("long-name", "s", false, "Some description")
+ f.BoolP("long-name2", "t", false, "Some description with\n multiline")
+ f.SetOutput(buf)
+ return f
+}
+
+func TestPrintUsage(t *testing.T) {
+ buf := bytes.Buffer{}
+ f := setUpPFlagSet(&buf)
+ f.PrintDefaults()
+ res := buf.String()
+ if res != expectedOutput {
+ t.Errorf("Expected \n%s \nActual \n%s", expectedOutput, res)
+ }
+}
+
+func setUpPFlagSet2(buf io.Writer) *FlagSet {
+ f := NewFlagSet("test", ExitOnError)
+ f.Bool("long-form", false, "Some description")
+ f.Bool("long-form2", false, "Some description\n with multiline")
+ f.BoolP("long-name", "s", false, "Some description")
+ f.BoolP("long-name2", "t", false, "Some description with\n multiline")
+ f.StringP("some-very-long-arg", "l", "test", "Some very long description having break the limit")
+ f.StringP("other-very-long-arg", "o", "long-default-value", "Some very long description having break the limit")
+ f.String("some-very-long-arg2", "very long default value", "Some very long description\nwith line break\nmultiple")
+ f.SetOutput(buf)
+ return f
+}
+
+const expectedOutput2 = ` --long-form Some description
+ --long-form2 Some description
+ with multiline
+ -s, --long-name Some description
+ -t, --long-name2 Some description with
+ multiline
+ -o, --other-very-long-arg string Some very long description having
+ break the limit (default
+ "long-default-value")
+ -l, --some-very-long-arg string Some very long description having
+ break the limit (default "test")
+ --some-very-long-arg2 string Some very long description
+ with line break
+ multiple (default "very long default
+ value")
+`
+
+func TestPrintUsage_2(t *testing.T) {
+ buf := bytes.Buffer{}
+ f := setUpPFlagSet2(&buf)
+ res := f.FlagUsagesWrapped(80)
+ if res != expectedOutput2 {
+ t.Errorf("Expected \n%q \nActual \n%q", expectedOutput2, res)
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/string_array_test.go b/vendor/github.com/spf13/pflag/string_array_test.go
new file mode 100644
index 0000000..1ceac8c
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_array_test.go
@@ -0,0 +1,233 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ "fmt"
+ "testing"
+)
+
+func setUpSAFlagSet(sap *[]string) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.StringArrayVar(sap, "sa", []string{}, "Command separated list!")
+ return f
+}
+
+func setUpSAFlagSetWithDefault(sap *[]string) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.StringArrayVar(sap, "sa", []string{"default", "values"}, "Command separated list!")
+ return f
+}
+
+func TestEmptySA(t *testing.T) {
+ var sa []string
+ f := setUpSAFlagSet(&sa)
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ getSA, err := f.GetStringArray("sa")
+ if err != nil {
+ t.Fatal("got an error from GetStringArray():", err)
+ }
+ if len(getSA) != 0 {
+ t.Fatalf("got sa %v with len=%d but expected length=0", getSA, len(getSA))
+ }
+}
+
+func TestEmptySAValue(t *testing.T) {
+ var sa []string
+ f := setUpSAFlagSet(&sa)
+ err := f.Parse([]string{"--sa="})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ getSA, err := f.GetStringArray("sa")
+ if err != nil {
+ t.Fatal("got an error from GetStringArray():", err)
+ }
+ if len(getSA) != 0 {
+ t.Fatalf("got sa %v with len=%d but expected length=0", getSA, len(getSA))
+ }
+}
+
+func TestSADefault(t *testing.T) {
+ var sa []string
+ f := setUpSAFlagSetWithDefault(&sa)
+
+ vals := []string{"default", "values"}
+
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range sa {
+ if vals[i] != v {
+ t.Fatalf("expected sa[%d] to be %s but got: %s", i, vals[i], v)
+ }
+ }
+
+ getSA, err := f.GetStringArray("sa")
+ if err != nil {
+ t.Fatal("got an error from GetStringArray():", err)
+ }
+ for i, v := range getSA {
+ if vals[i] != v {
+ t.Fatalf("expected sa[%d] to be %s from GetStringArray but got: %s", i, vals[i], v)
+ }
+ }
+}
+
+func TestSAWithDefault(t *testing.T) {
+ var sa []string
+ f := setUpSAFlagSetWithDefault(&sa)
+
+ val := "one"
+ arg := fmt.Sprintf("--sa=%s", val)
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ if len(sa) != 1 {
+ t.Fatalf("expected number of values to be %d but %d", 1, len(sa))
+ }
+
+ if sa[0] != val {
+ t.Fatalf("expected value to be %s but got: %s", sa[0], val)
+ }
+
+ getSA, err := f.GetStringArray("sa")
+ if err != nil {
+ t.Fatal("got an error from GetStringArray():", err)
+ }
+
+ if len(getSA) != 1 {
+ t.Fatalf("expected number of values to be %d but %d", 1, len(getSA))
+ }
+
+ if getSA[0] != val {
+ t.Fatalf("expected value to be %s but got: %s", getSA[0], val)
+ }
+}
+
+func TestSACalledTwice(t *testing.T) {
+ var sa []string
+ f := setUpSAFlagSet(&sa)
+
+ in := []string{"one", "two"}
+ expected := []string{"one", "two"}
+ argfmt := "--sa=%s"
+ arg1 := fmt.Sprintf(argfmt, in[0])
+ arg2 := fmt.Sprintf(argfmt, in[1])
+ err := f.Parse([]string{arg1, arg2})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ if len(expected) != len(sa) {
+ t.Fatalf("expected number of sa to be %d but got: %d", len(expected), len(sa))
+ }
+ for i, v := range sa {
+ if expected[i] != v {
+ t.Fatalf("expected sa[%d] to be %s but got: %s", i, expected[i], v)
+ }
+ }
+
+ values, err := f.GetStringArray("sa")
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ if len(expected) != len(values) {
+ t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(sa))
+ }
+ for i, v := range values {
+ if expected[i] != v {
+ t.Fatalf("expected got sa[%d] to be %s but got: %s", i, expected[i], v)
+ }
+ }
+}
+
+func TestSAWithSpecialChar(t *testing.T) {
+ var sa []string
+ f := setUpSAFlagSet(&sa)
+
+ in := []string{"one,two", `"three"`, `"four,five",six`, "seven eight"}
+ expected := []string{"one,two", `"three"`, `"four,five",six`, "seven eight"}
+ argfmt := "--sa=%s"
+ arg1 := fmt.Sprintf(argfmt, in[0])
+ arg2 := fmt.Sprintf(argfmt, in[1])
+ arg3 := fmt.Sprintf(argfmt, in[2])
+ arg4 := fmt.Sprintf(argfmt, in[3])
+ err := f.Parse([]string{arg1, arg2, arg3, arg4})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ if len(expected) != len(sa) {
+ t.Fatalf("expected number of sa to be %d but got: %d", len(expected), len(sa))
+ }
+ for i, v := range sa {
+ if expected[i] != v {
+ t.Fatalf("expected sa[%d] to be %s but got: %s", i, expected[i], v)
+ }
+ }
+
+ values, err := f.GetStringArray("sa")
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ if len(expected) != len(values) {
+ t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values))
+ }
+ for i, v := range values {
+ if expected[i] != v {
+ t.Fatalf("expected got sa[%d] to be %s but got: %s", i, expected[i], v)
+ }
+ }
+}
+
+func TestSAWithSquareBrackets(t *testing.T) {
+ var sa []string
+ f := setUpSAFlagSet(&sa)
+
+ in := []string{"][]-[", "[a-z]", "[a-z]+"}
+ expected := []string{"][]-[", "[a-z]", "[a-z]+"}
+ argfmt := "--sa=%s"
+ arg1 := fmt.Sprintf(argfmt, in[0])
+ arg2 := fmt.Sprintf(argfmt, in[1])
+ arg3 := fmt.Sprintf(argfmt, in[2])
+ err := f.Parse([]string{arg1, arg2, arg3})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ if len(expected) != len(sa) {
+ t.Fatalf("expected number of sa to be %d but got: %d", len(expected), len(sa))
+ }
+ for i, v := range sa {
+ if expected[i] != v {
+ t.Fatalf("expected sa[%d] to be %s but got: %s", i, expected[i], v)
+ }
+ }
+
+ values, err := f.GetStringArray("sa")
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ if len(expected) != len(values) {
+ t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values))
+ }
+ for i, v := range values {
+ if expected[i] != v {
+ t.Fatalf("expected got sa[%d] to be %s but got: %s", i, expected[i], v)
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/string_slice_test.go b/vendor/github.com/spf13/pflag/string_slice_test.go
new file mode 100644
index 0000000..c41f3bd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_slice_test.go
@@ -0,0 +1,253 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+)
+
+func setUpSSFlagSet(ssp *[]string) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.StringSliceVar(ssp, "ss", []string{}, "Command separated list!")
+ return f
+}
+
+func setUpSSFlagSetWithDefault(ssp *[]string) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.StringSliceVar(ssp, "ss", []string{"default", "values"}, "Command separated list!")
+ return f
+}
+
+func TestEmptySS(t *testing.T) {
+ var ss []string
+ f := setUpSSFlagSet(&ss)
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ getSS, err := f.GetStringSlice("ss")
+ if err != nil {
+ t.Fatal("got an error from GetStringSlice():", err)
+ }
+ if len(getSS) != 0 {
+ t.Fatalf("got ss %v with len=%d but expected length=0", getSS, len(getSS))
+ }
+}
+
+func TestEmptySSValue(t *testing.T) {
+ var ss []string
+ f := setUpSSFlagSet(&ss)
+ err := f.Parse([]string{"--ss="})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ getSS, err := f.GetStringSlice("ss")
+ if err != nil {
+ t.Fatal("got an error from GetStringSlice():", err)
+ }
+ if len(getSS) != 0 {
+ t.Fatalf("got ss %v with len=%d but expected length=0", getSS, len(getSS))
+ }
+}
+
+func TestSS(t *testing.T) {
+ var ss []string
+ f := setUpSSFlagSet(&ss)
+
+ vals := []string{"one", "two", "4", "3"}
+ arg := fmt.Sprintf("--ss=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range ss {
+ if vals[i] != v {
+ t.Fatalf("expected ss[%d] to be %s but got: %s", i, vals[i], v)
+ }
+ }
+
+ getSS, err := f.GetStringSlice("ss")
+ if err != nil {
+ t.Fatal("got an error from GetStringSlice():", err)
+ }
+ for i, v := range getSS {
+ if vals[i] != v {
+ t.Fatalf("expected ss[%d] to be %s from GetStringSlice but got: %s", i, vals[i], v)
+ }
+ }
+}
+
+func TestSSDefault(t *testing.T) {
+ var ss []string
+ f := setUpSSFlagSetWithDefault(&ss)
+
+ vals := []string{"default", "values"}
+
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range ss {
+ if vals[i] != v {
+ t.Fatalf("expected ss[%d] to be %s but got: %s", i, vals[i], v)
+ }
+ }
+
+ getSS, err := f.GetStringSlice("ss")
+ if err != nil {
+ t.Fatal("got an error from GetStringSlice():", err)
+ }
+ for i, v := range getSS {
+ if vals[i] != v {
+ t.Fatalf("expected ss[%d] to be %s from GetStringSlice but got: %s", i, vals[i], v)
+ }
+ }
+}
+
+func TestSSWithDefault(t *testing.T) {
+ var ss []string
+ f := setUpSSFlagSetWithDefault(&ss)
+
+ vals := []string{"one", "two", "4", "3"}
+ arg := fmt.Sprintf("--ss=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range ss {
+ if vals[i] != v {
+ t.Fatalf("expected ss[%d] to be %s but got: %s", i, vals[i], v)
+ }
+ }
+
+ getSS, err := f.GetStringSlice("ss")
+ if err != nil {
+ t.Fatal("got an error from GetStringSlice():", err)
+ }
+ for i, v := range getSS {
+ if vals[i] != v {
+ t.Fatalf("expected ss[%d] to be %s from GetStringSlice but got: %s", i, vals[i], v)
+ }
+ }
+}
+
+func TestSSCalledTwice(t *testing.T) {
+ var ss []string
+ f := setUpSSFlagSet(&ss)
+
+ in := []string{"one,two", "three"}
+ expected := []string{"one", "two", "three"}
+ argfmt := "--ss=%s"
+ arg1 := fmt.Sprintf(argfmt, in[0])
+ arg2 := fmt.Sprintf(argfmt, in[1])
+ err := f.Parse([]string{arg1, arg2})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ if len(expected) != len(ss) {
+ t.Fatalf("expected number of ss to be %d but got: %d", len(expected), len(ss))
+ }
+ for i, v := range ss {
+ if expected[i] != v {
+ t.Fatalf("expected ss[%d] to be %s but got: %s", i, expected[i], v)
+ }
+ }
+
+ values, err := f.GetStringSlice("ss")
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ if len(expected) != len(values) {
+ t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(ss))
+ }
+ for i, v := range values {
+ if expected[i] != v {
+ t.Fatalf("expected got ss[%d] to be %s but got: %s", i, expected[i], v)
+ }
+ }
+}
+
+func TestSSWithComma(t *testing.T) {
+ var ss []string
+ f := setUpSSFlagSet(&ss)
+
+ in := []string{`"one,two"`, `"three"`, `"four,five",six`}
+ expected := []string{"one,two", "three", "four,five", "six"}
+ argfmt := "--ss=%s"
+ arg1 := fmt.Sprintf(argfmt, in[0])
+ arg2 := fmt.Sprintf(argfmt, in[1])
+ arg3 := fmt.Sprintf(argfmt, in[2])
+ err := f.Parse([]string{arg1, arg2, arg3})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ if len(expected) != len(ss) {
+ t.Fatalf("expected number of ss to be %d but got: %d", len(expected), len(ss))
+ }
+ for i, v := range ss {
+ if expected[i] != v {
+ t.Fatalf("expected ss[%d] to be %s but got: %s", i, expected[i], v)
+ }
+ }
+
+ values, err := f.GetStringSlice("ss")
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ if len(expected) != len(values) {
+ t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values))
+ }
+ for i, v := range values {
+ if expected[i] != v {
+ t.Fatalf("expected got ss[%d] to be %s but got: %s", i, expected[i], v)
+ }
+ }
+}
+
+func TestSSWithSquareBrackets(t *testing.T) {
+ var ss []string
+ f := setUpSSFlagSet(&ss)
+
+ in := []string{`"[a-z]"`, `"[a-z]+"`}
+ expected := []string{"[a-z]", "[a-z]+"}
+ argfmt := "--ss=%s"
+ arg1 := fmt.Sprintf(argfmt, in[0])
+ arg2 := fmt.Sprintf(argfmt, in[1])
+ err := f.Parse([]string{arg1, arg2})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ if len(expected) != len(ss) {
+ t.Fatalf("expected number of ss to be %d but got: %d", len(expected), len(ss))
+ }
+ for i, v := range ss {
+ if expected[i] != v {
+ t.Fatalf("expected ss[%d] to be %s but got: %s", i, expected[i], v)
+ }
+ }
+
+ values, err := f.GetStringSlice("ss")
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ if len(expected) != len(values) {
+ t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values))
+ }
+ for i, v := range values {
+ if expected[i] != v {
+ t.Fatalf("expected got ss[%d] to be %s but got: %s", i, expected[i], v)
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/uint_slice_test.go b/vendor/github.com/spf13/pflag/uint_slice_test.go
new file mode 100644
index 0000000..db1a19d
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint_slice_test.go
@@ -0,0 +1,161 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func setUpUISFlagSet(uisp *[]uint) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.UintSliceVar(uisp, "uis", []uint{}, "Command separated list!")
+ return f
+}
+
+func setUpUISFlagSetWithDefault(uisp *[]uint) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.UintSliceVar(uisp, "uis", []uint{0, 1}, "Command separated list!")
+ return f
+}
+
+func TestEmptyUIS(t *testing.T) {
+ var uis []uint
+ f := setUpUISFlagSet(&uis)
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ getUIS, err := f.GetUintSlice("uis")
+ if err != nil {
+ t.Fatal("got an error from GetUintSlice():", err)
+ }
+ if len(getUIS) != 0 {
+ t.Fatalf("got is %v with len=%d but expected length=0", getUIS, len(getUIS))
+ }
+}
+
+func TestUIS(t *testing.T) {
+ var uis []uint
+ f := setUpUISFlagSet(&uis)
+
+ vals := []string{"1", "2", "4", "3"}
+ arg := fmt.Sprintf("--uis=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range uis {
+ u, err := strconv.ParseUint(vals[i], 10, 0)
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if uint(u) != v {
+ t.Fatalf("expected uis[%d] to be %s but got %d", i, vals[i], v)
+ }
+ }
+ getUIS, err := f.GetUintSlice("uis")
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ for i, v := range getUIS {
+ u, err := strconv.ParseUint(vals[i], 10, 0)
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if uint(u) != v {
+ t.Fatalf("expected uis[%d] to be %s but got: %d from GetUintSlice", i, vals[i], v)
+ }
+ }
+}
+
+func TestUISDefault(t *testing.T) {
+ var uis []uint
+ f := setUpUISFlagSetWithDefault(&uis)
+
+ vals := []string{"0", "1"}
+
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range uis {
+ u, err := strconv.ParseUint(vals[i], 10, 0)
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if uint(u) != v {
+ t.Fatalf("expect uis[%d] to be %d but got: %d", i, u, v)
+ }
+ }
+
+ getUIS, err := f.GetUintSlice("uis")
+ if err != nil {
+ t.Fatal("got an error from GetUintSlice():", err)
+ }
+ for i, v := range getUIS {
+ u, err := strconv.ParseUint(vals[i], 10, 0)
+ if err != nil {
+ t.Fatal("got an error from GetIntSlice():", err)
+ }
+ if uint(u) != v {
+ t.Fatalf("expected uis[%d] to be %d from GetUintSlice but got: %d", i, u, v)
+ }
+ }
+}
+
+func TestUISWithDefault(t *testing.T) {
+ var uis []uint
+ f := setUpUISFlagSetWithDefault(&uis)
+
+ vals := []string{"1", "2"}
+ arg := fmt.Sprintf("--uis=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range uis {
+ u, err := strconv.ParseUint(vals[i], 10, 0)
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if uint(u) != v {
+ t.Fatalf("expected uis[%d] to be %d from GetUintSlice but got: %d", i, u, v)
+ }
+ }
+
+ getUIS, err := f.GetUintSlice("uis")
+ if err != nil {
+ t.Fatal("got an error from GetUintSlice():", err)
+ }
+ for i, v := range getUIS {
+ u, err := strconv.ParseUint(vals[i], 10, 0)
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if uint(u) != v {
+ t.Fatalf("expected uis[%d] to be %d from GetUintSlice but got: %d", i, u, v)
+ }
+ }
+}
+
+func TestUISCalledTwice(t *testing.T) {
+ var uis []uint
+ f := setUpUISFlagSet(&uis)
+
+ in := []string{"1,2", "3"}
+ expected := []int{1, 2, 3}
+ argfmt := "--uis=%s"
+ arg1 := fmt.Sprintf(argfmt, in[0])
+ arg2 := fmt.Sprintf(argfmt, in[1])
+ err := f.Parse([]string{arg1, arg2})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range uis {
+ if uint(expected[i]) != v {
+ t.Fatalf("expected uis[%d] to be %d but got: %d", i, expected[i], v)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go
new file mode 100644
index 0000000..d9b77c1
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go
@@ -0,0 +1,358 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd windows plan9 solaris
+
+package terminal
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "runtime"
+ "testing"
+)
+
+type MockTerminal struct {
+ toSend []byte
+ bytesPerRead int
+ received []byte
+}
+
+func (c *MockTerminal) Read(data []byte) (n int, err error) {
+ n = len(data)
+ if n == 0 {
+ return
+ }
+ if n > len(c.toSend) {
+ n = len(c.toSend)
+ }
+ if n == 0 {
+ return 0, io.EOF
+ }
+ if c.bytesPerRead > 0 && n > c.bytesPerRead {
+ n = c.bytesPerRead
+ }
+ copy(data, c.toSend[:n])
+ c.toSend = c.toSend[n:]
+ return
+}
+
+func (c *MockTerminal) Write(data []byte) (n int, err error) {
+ c.received = append(c.received, data...)
+ return len(data), nil
+}
+
+func TestClose(t *testing.T) {
+ c := &MockTerminal{}
+ ss := NewTerminal(c, "> ")
+ line, err := ss.ReadLine()
+ if line != "" {
+ t.Errorf("Expected empty line but got: %s", line)
+ }
+ if err != io.EOF {
+ t.Errorf("Error should have been EOF but got: %s", err)
+ }
+}
+
+var keyPressTests = []struct {
+ in string
+ line string
+ err error
+ throwAwayLines int
+}{
+ {
+ err: io.EOF,
+ },
+ {
+ in: "\r",
+ line: "",
+ },
+ {
+ in: "foo\r",
+ line: "foo",
+ },
+ {
+ in: "a\x1b[Cb\r", // right
+ line: "ab",
+ },
+ {
+ in: "a\x1b[Db\r", // left
+ line: "ba",
+ },
+ {
+ in: "a\177b\r", // backspace
+ line: "b",
+ },
+ {
+ in: "\x1b[A\r", // up
+ },
+ {
+ in: "\x1b[B\r", // down
+ },
+ {
+ in: "line\x1b[A\x1b[B\r", // up then down
+ line: "line",
+ },
+ {
+ in: "line1\rline2\x1b[A\r", // recall previous line.
+ line: "line1",
+ throwAwayLines: 1,
+ },
+ {
+ // recall two previous lines and append.
+ in: "line1\rline2\rline3\x1b[A\x1b[Axxx\r",
+ line: "line1xxx",
+ throwAwayLines: 2,
+ },
+ {
+ // Ctrl-A to move to beginning of line followed by ^K to kill
+ // line.
+ in: "a b \001\013\r",
+ line: "",
+ },
+ {
+ // Ctrl-A to move to beginning of line, Ctrl-E to move to end,
+ // finally ^K to kill nothing.
+ in: "a b \001\005\013\r",
+ line: "a b ",
+ },
+ {
+ in: "\027\r",
+ line: "",
+ },
+ {
+ in: "a\027\r",
+ line: "",
+ },
+ {
+ in: "a \027\r",
+ line: "",
+ },
+ {
+ in: "a b\027\r",
+ line: "a ",
+ },
+ {
+ in: "a b \027\r",
+ line: "a ",
+ },
+ {
+ in: "one two thr\x1b[D\027\r",
+ line: "one two r",
+ },
+ {
+ in: "\013\r",
+ line: "",
+ },
+ {
+ in: "a\013\r",
+ line: "a",
+ },
+ {
+ in: "ab\x1b[D\013\r",
+ line: "a",
+ },
+ {
+ in: "Ξεσκεπάζω\r",
+ line: "Ξεσκεπάζω",
+ },
+ {
+ in: "£\r\x1b[A\177\r", // non-ASCII char, enter, up, backspace.
+ line: "",
+ throwAwayLines: 1,
+ },
+ {
+ in: "£\r££\x1b[A\x1b[B\177\r", // non-ASCII char, enter, 2x non-ASCII, up, down, backspace, enter.
+ line: "£",
+ throwAwayLines: 1,
+ },
+ {
+ // Ctrl-D at the end of the line should be ignored.
+ in: "a\004\r",
+ line: "a",
+ },
+ {
+ // a, b, left, Ctrl-D should erase the b.
+ in: "ab\x1b[D\004\r",
+ line: "a",
+ },
+ {
+ // a, b, c, d, left, left, ^U should erase to the beginning of
+ // the line.
+ in: "abcd\x1b[D\x1b[D\025\r",
+ line: "cd",
+ },
+ {
+ // Bracketed paste mode: control sequences should be returned
+ // verbatim in paste mode.
+ in: "abc\x1b[200~de\177f\x1b[201~\177\r",
+ line: "abcde\177",
+ },
+ {
+ // Enter in bracketed paste mode should still work.
+ in: "abc\x1b[200~d\refg\x1b[201~h\r",
+ line: "efgh",
+ throwAwayLines: 1,
+ },
+ {
+ // Lines consisting entirely of pasted data should be indicated as such.
+ in: "\x1b[200~a\r",
+ line: "a",
+ err: ErrPasteIndicator,
+ },
+}
+
+func TestKeyPresses(t *testing.T) {
+ for i, test := range keyPressTests {
+ for j := 1; j < len(test.in); j++ {
+ c := &MockTerminal{
+ toSend: []byte(test.in),
+ bytesPerRead: j,
+ }
+ ss := NewTerminal(c, "> ")
+ for k := 0; k < test.throwAwayLines; k++ {
+ _, err := ss.ReadLine()
+ if err != nil {
+ t.Errorf("Throwaway line %d from test %d resulted in error: %s", k, i, err)
+ }
+ }
+ line, err := ss.ReadLine()
+ if line != test.line {
+ t.Errorf("Line resulting from test %d (%d bytes per read) was '%s', expected '%s'", i, j, line, test.line)
+ break
+ }
+ if err != test.err {
+ t.Errorf("Error resulting from test %d (%d bytes per read) was '%v', expected '%v'", i, j, err, test.err)
+ break
+ }
+ }
+ }
+}
+
+func TestPasswordNotSaved(t *testing.T) {
+ c := &MockTerminal{
+ toSend: []byte("password\r\x1b[A\r"),
+ bytesPerRead: 1,
+ }
+ ss := NewTerminal(c, "> ")
+ pw, _ := ss.ReadPassword("> ")
+ if pw != "password" {
+ t.Fatalf("failed to read password, got %s", pw)
+ }
+ line, _ := ss.ReadLine()
+ if len(line) > 0 {
+ t.Fatalf("password was saved in history")
+ }
+}
+
+var setSizeTests = []struct {
+ width, height int
+}{
+ {40, 13},
+ {80, 24},
+ {132, 43},
+}
+
+func TestTerminalSetSize(t *testing.T) {
+ for _, setSize := range setSizeTests {
+ c := &MockTerminal{
+ toSend: []byte("password\r\x1b[A\r"),
+ bytesPerRead: 1,
+ }
+ ss := NewTerminal(c, "> ")
+ ss.SetSize(setSize.width, setSize.height)
+ pw, _ := ss.ReadPassword("Password: ")
+ if pw != "password" {
+ t.Fatalf("failed to read password, got %s", pw)
+ }
+ if string(c.received) != "Password: \r\n" {
+ t.Errorf("failed to set the temporary prompt expected %q, got %q", "Password: ", c.received)
+ }
+ }
+}
+
+func TestReadPasswordLineEnd(t *testing.T) {
+ var tests = []struct {
+ input string
+ want string
+ }{
+ {"\n", ""},
+ {"\r\n", ""},
+ {"test\r\n", "test"},
+ {"testtesttesttes\n", "testtesttesttes"},
+ {"testtesttesttes\r\n", "testtesttesttes"},
+ {"testtesttesttesttest\n", "testtesttesttesttest"},
+ {"testtesttesttesttest\r\n", "testtesttesttesttest"},
+ }
+ for _, test := range tests {
+ buf := new(bytes.Buffer)
+ if _, err := buf.WriteString(test.input); err != nil {
+ t.Fatal(err)
+ }
+
+ have, err := readPasswordLine(buf)
+ if err != nil {
+ t.Errorf("readPasswordLine(%q) failed: %v", test.input, err)
+ continue
+ }
+ if string(have) != test.want {
+ t.Errorf("readPasswordLine(%q) returns %q, but %q is expected", test.input, string(have), test.want)
+ continue
+ }
+
+ if _, err = buf.WriteString(test.input); err != nil {
+ t.Fatal(err)
+ }
+ have, err = readPasswordLine(buf)
+ if err != nil {
+ t.Errorf("readPasswordLine(%q) failed: %v", test.input, err)
+ continue
+ }
+ if string(have) != test.want {
+ t.Errorf("readPasswordLine(%q) returns %q, but %q is expected", test.input, string(have), test.want)
+ continue
+ }
+ }
+}
+
+func TestMakeRawState(t *testing.T) {
+ fd := int(os.Stdout.Fd())
+ if !IsTerminal(fd) {
+ t.Skip("stdout is not a terminal; skipping test")
+ }
+
+ st, err := GetState(fd)
+ if err != nil {
+ t.Fatalf("failed to get terminal state from GetState: %s", err)
+ }
+
+ if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") {
+ t.Skip("MakeRaw not allowed on iOS; skipping test")
+ }
+
+ defer Restore(fd, st)
+ raw, err := MakeRaw(fd)
+ if err != nil {
+ t.Fatalf("failed to get terminal state from MakeRaw: %s", err)
+ }
+
+ if *st != *raw {
+ t.Errorf("states do not match; was %v, expected %v", raw, st)
+ }
+}
+
+func TestOutputNewlines(t *testing.T) {
+ // \n should be changed to \r\n in terminal output.
+ buf := new(bytes.Buffer)
+ term := NewTerminal(buf, ">")
+
+ term.Write([]byte("1\n2\n"))
+ output := string(buf.Bytes())
+ const expected = "1\r\n2\r\n"
+
+ if output != expected {
+ t.Errorf("incorrect output: was %q, expected %q", output, expected)
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/creds_test.go b/vendor/golang.org/x/sys/unix/creds_test.go
new file mode 100644
index 0000000..1b50831
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/creds_test.go
@@ -0,0 +1,134 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package unix_test
+
+import (
+ "bytes"
+ "go/build"
+ "net"
+ "os"
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
+// TestSCMCredentials tests the sending and receiving of credentials
+// (PID, UID, GID) in an ancillary message between two UNIX
+// sockets. The SO_PASSCRED socket option is enabled on the sending
+// socket for this to work.
+func TestSCMCredentials(t *testing.T) {
+ socketTypeTests := []struct {
+ socketType int
+ dataLen int
+ }{
+ {
+ unix.SOCK_STREAM,
+ 1,
+ }, {
+ unix.SOCK_DGRAM,
+ 0,
+ },
+ }
+
+ for _, tt := range socketTypeTests {
+ if tt.socketType == unix.SOCK_DGRAM && !atLeast1p10() {
+ t.Log("skipping DGRAM test on pre-1.10")
+ continue
+ }
+
+ fds, err := unix.Socketpair(unix.AF_LOCAL, tt.socketType, 0)
+ if err != nil {
+ t.Fatalf("Socketpair: %v", err)
+ }
+ defer unix.Close(fds[0])
+ defer unix.Close(fds[1])
+
+ err = unix.SetsockoptInt(fds[0], unix.SOL_SOCKET, unix.SO_PASSCRED, 1)
+ if err != nil {
+ t.Fatalf("SetsockoptInt: %v", err)
+ }
+
+ srvFile := os.NewFile(uintptr(fds[0]), "server")
+ defer srvFile.Close()
+ srv, err := net.FileConn(srvFile)
+ if err != nil {
+ t.Errorf("FileConn: %v", err)
+ return
+ }
+ defer srv.Close()
+
+ cliFile := os.NewFile(uintptr(fds[1]), "client")
+ defer cliFile.Close()
+ cli, err := net.FileConn(cliFile)
+ if err != nil {
+ t.Errorf("FileConn: %v", err)
+ return
+ }
+ defer cli.Close()
+
+ var ucred unix.Ucred
+ ucred.Pid = int32(os.Getpid())
+ ucred.Uid = uint32(os.Getuid())
+ ucred.Gid = uint32(os.Getgid())
+ oob := unix.UnixCredentials(&ucred)
+
+ // On SOCK_STREAM, this is internally going to send a dummy byte
+ n, oobn, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil)
+ if err != nil {
+ t.Fatalf("WriteMsgUnix: %v", err)
+ }
+ if n != 0 {
+ t.Fatalf("WriteMsgUnix n = %d, want 0", n)
+ }
+ if oobn != len(oob) {
+ t.Fatalf("WriteMsgUnix oobn = %d, want %d", oobn, len(oob))
+ }
+
+ oob2 := make([]byte, 10*len(oob))
+ n, oobn2, flags, _, err := srv.(*net.UnixConn).ReadMsgUnix(nil, oob2)
+ if err != nil {
+ t.Fatalf("ReadMsgUnix: %v", err)
+ }
+ if flags != 0 {
+ t.Fatalf("ReadMsgUnix flags = 0x%x, want 0", flags)
+ }
+ if n != tt.dataLen {
+ t.Fatalf("ReadMsgUnix n = %d, want %d", n, tt.dataLen)
+ }
+ if oobn2 != oobn {
+ // without SO_PASSCRED set on the socket, ReadMsgUnix will
+ // return zero oob bytes
+ t.Fatalf("ReadMsgUnix oobn = %d, want %d", oobn2, oobn)
+ }
+ oob2 = oob2[:oobn2]
+ if !bytes.Equal(oob, oob2) {
+ t.Fatal("ReadMsgUnix oob bytes don't match")
+ }
+
+ scm, err := unix.ParseSocketControlMessage(oob2)
+ if err != nil {
+ t.Fatalf("ParseSocketControlMessage: %v", err)
+ }
+ newUcred, err := unix.ParseUnixCredentials(&scm[0])
+ if err != nil {
+ t.Fatalf("ParseUnixCredentials: %v", err)
+ }
+ if *newUcred != ucred {
+ t.Fatalf("ParseUnixCredentials = %+v, want %+v", newUcred, ucred)
+ }
+ }
+}
+
+// atLeast1p10 reports whether we are running on Go 1.10 or later.
+func atLeast1p10() bool {
+ for _, ver := range build.Default.ReleaseTags {
+ if ver == "go1.10" {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_linux_test.go b/vendor/golang.org/x/sys/unix/dev_linux_test.go
new file mode 100644
index 0000000..5164528
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_linux_test.go
@@ -0,0 +1,56 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package unix_test
+
+import (
+ "fmt"
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestDevices(t *testing.T) {
+ testCases := []struct {
+ path string
+ major uint32
+ minor uint32
+ }{
+ // well known major/minor numbers according to
+ // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/admin-guide/devices.txt
+ {"/dev/null", 1, 3},
+ {"/dev/zero", 1, 5},
+ {"/dev/random", 1, 8},
+ {"/dev/full", 1, 7},
+ {"/dev/urandom", 1, 9},
+ {"/dev/tty", 5, 0},
+ }
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) {
+ var stat unix.Stat_t
+ err := unix.Stat(tc.path, &stat)
+ if err != nil {
+ if err == unix.EACCES {
+ t.Skip("no permission to stat device, skipping test")
+ }
+ t.Errorf("failed to stat device: %v", err)
+ return
+ }
+
+ dev := uint64(stat.Rdev)
+ if unix.Major(dev) != tc.major {
+ t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major)
+ }
+ if unix.Minor(dev) != tc.minor {
+ t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor)
+ }
+ if unix.Mkdev(tc.major, tc.minor) != dev {
+ t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev)
+ }
+ })
+
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/example_test.go b/vendor/golang.org/x/sys/unix/example_test.go
new file mode 100644
index 0000000..10619af
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/example_test.go
@@ -0,0 +1,19 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix_test
+
+import (
+ "log"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+func ExampleExec() {
+ err := unix.Exec("/bin/ls", []string{"ls", "-al"}, os.Environ())
+ log.Fatal(err)
+}
diff --git a/vendor/golang.org/x/sys/unix/export_test.go b/vendor/golang.org/x/sys/unix/export_test.go
new file mode 100644
index 0000000..e802469
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/export_test.go
@@ -0,0 +1,9 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+var Itoa = itoa
diff --git a/vendor/golang.org/x/sys/unix/mmap_unix_test.go b/vendor/golang.org/x/sys/unix/mmap_unix_test.go
new file mode 100644
index 0000000..3258ca3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mmap_unix_test.go
@@ -0,0 +1,35 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix_test
+
+import (
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestMmap(t *testing.T) {
+ b, err := unix.Mmap(-1, 0, unix.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_PRIVATE)
+ if err != nil {
+ t.Fatalf("Mmap: %v", err)
+ }
+ if err := unix.Mprotect(b, unix.PROT_READ|unix.PROT_WRITE); err != nil {
+ t.Fatalf("Mprotect: %v", err)
+ }
+
+ b[0] = 42
+
+ if err := unix.Msync(b, unix.MS_SYNC); err != nil {
+ t.Fatalf("Msync: %v", err)
+ }
+ if err := unix.Madvise(b, unix.MADV_DONTNEED); err != nil {
+ t.Fatalf("Madvise: %v", err)
+ }
+ if err := unix.Munmap(b); err != nil {
+ t.Fatalf("Munmap: %v", err)
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/openbsd_test.go b/vendor/golang.org/x/sys/unix/openbsd_test.go
new file mode 100644
index 0000000..734d765
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/openbsd_test.go
@@ -0,0 +1,113 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build openbsd
+
+// This, on the face of it, bizarre testing mechanism is necessary because
+// the only reliable way to gauge whether or not a pledge(2) call has succeeded
+// is that the program has been killed as a result of breaking its pledge.
+
+package unix_test
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
+type testProc struct {
+ fn func() // should always exit instead of returning
+ cleanup func() error // for instance, delete coredumps from testing pledge
+ success bool // whether zero-exit means success or failure
+}
+
+var (
+ testProcs = map[string]testProc{}
+ procName = ""
+)
+
+const (
+ optName = "sys-unix-internal-procname"
+)
+
+func init() {
+ flag.StringVar(&procName, optName, "", "internal use only")
+}
+
+// testCmd generates a proper command that, when executed, runs the test
+// corresponding to the given key.
+func testCmd(procName string) (*exec.Cmd, error) {
+ exe, err := filepath.Abs(os.Args[0])
+ if err != nil {
+ return nil, err
+ }
+ cmd := exec.Command(exe, "-"+optName+"="+procName)
+ cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
+ return cmd, nil
+}
+
+// ExitsCorrectly is a comprehensive, one-line-of-use wrapper for testing
+// a testProc with a key.
+func ExitsCorrectly(procName string, t *testing.T) {
+ s := testProcs[procName]
+ c, err := testCmd(procName)
+ defer func() {
+ if s.cleanup() != nil {
+ t.Fatalf("Failed to run cleanup for %s", procName)
+ }
+ }()
+ if err != nil {
+ t.Fatalf("Failed to construct command for %s", procName)
+ }
+ if (c.Run() == nil) != s.success {
+ result := "succeed"
+ if !s.success {
+ result = "fail"
+ }
+ t.Fatalf("Process did not %s when it was supposed to", result)
+ }
+}
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+ if procName != "" {
+ testProcs[procName].fn()
+ }
+ os.Exit(m.Run())
+}
+
+// For example, add a test for pledge.
+func init() {
+ testProcs["pledge"] = testProc{
+ func() {
+ fmt.Println(unix.Pledge("", nil))
+ os.Exit(0)
+ },
+ func() error {
+ files, err := ioutil.ReadDir(".")
+ if err != nil {
+ return err
+ }
+ for _, file := range files {
+ if filepath.Ext(file.Name()) == ".core" {
+ if err := os.Remove(file.Name()); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+ },
+ false,
+ }
+}
+
+func TestPledge(t *testing.T) {
+ ExitsCorrectly("pledge", t)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd_test.go b/vendor/golang.org/x/sys/unix/syscall_bsd_test.go
new file mode 100644
index 0000000..6c4e2ac
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_bsd_test.go
@@ -0,0 +1,93 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd openbsd
+
+package unix_test
+
+import (
+ "os/exec"
+ "runtime"
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+const MNT_WAIT = 1
+const MNT_NOWAIT = 2
+
+func TestGetfsstat(t *testing.T) {
+ const flags = MNT_NOWAIT // see golang.org/issue/16937
+ n, err := unix.Getfsstat(nil, flags)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data := make([]unix.Statfs_t, n)
+ n2, err := unix.Getfsstat(data, flags)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != n2 {
+ t.Errorf("Getfsstat(nil) = %d, but subsequent Getfsstat(slice) = %d", n, n2)
+ }
+ for i, stat := range data {
+ if stat == (unix.Statfs_t{}) {
+ t.Errorf("index %v is an empty Statfs_t struct", i)
+ }
+ }
+ if t.Failed() {
+ for i, stat := range data[:n2] {
+ t.Logf("data[%v] = %+v", i, stat)
+ }
+ mount, err := exec.Command("mount").CombinedOutput()
+ if err != nil {
+ t.Logf("mount: %v\n%s", err, mount)
+ } else {
+ t.Logf("mount: %s", mount)
+ }
+ }
+}
+
+func TestSelect(t *testing.T) {
+ err := unix.Select(0, nil, nil, nil, &unix.Timeval{Sec: 0, Usec: 0})
+ if err != nil {
+ t.Fatalf("Select: %v", err)
+ }
+
+ dur := 250 * time.Millisecond
+ tv := unix.NsecToTimeval(int64(dur))
+ start := time.Now()
+ err = unix.Select(0, nil, nil, nil, &tv)
+ took := time.Since(start)
+ if err != nil {
+ t.Fatalf("Select: %v", err)
+ }
+
+ // On some BSDs the actual timeout might also be slightly less than the requested.
+ // Add an acceptable margin to avoid flaky tests.
+ if took < dur*2/3 {
+ t.Errorf("Select: timeout should have been at least %v, got %v", dur, took)
+ }
+}
+
+func TestSysctlRaw(t *testing.T) {
+ if runtime.GOOS == "openbsd" {
+ t.Skip("kern.proc.pid does not exist on OpenBSD")
+ }
+
+ _, err := unix.SysctlRaw("kern.proc.pid", unix.Getpid())
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestSysctlUint32(t *testing.T) {
+ maxproc, err := unix.SysctlUint32("kern.maxproc")
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("kern.maxproc: %v", maxproc)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_test.go b/vendor/golang.org/x/sys/unix/syscall_darwin_test.go
new file mode 100644
index 0000000..65691d5
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_test.go
@@ -0,0 +1,19 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix_test
+
+// stringsFromByteSlice converts a sequence of attributes to a []string.
+// On Darwin, each entry is a NULL-terminated string.
+func stringsFromByteSlice(buf []byte) []string {
+ var result []string
+ off := 0
+ for i, b := range buf {
+ if b == 0 {
+ result = append(result, string(buf[off:i]))
+ off = i + 1
+ }
+ }
+ return result
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go
new file mode 100644
index 0000000..0fec1a8
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go
@@ -0,0 +1,312 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd
+
+package unix_test
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "runtime"
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestSysctlUint64(t *testing.T) {
+ _, err := unix.SysctlUint64("vm.swap_total")
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// FIXME: Infrastructure for launching tests in subprocesses stolen from openbsd_test.go - refactor?
+// testCmd generates a proper command that, when executed, runs the test
+// corresponding to the given key.
+
+type testProc struct {
+ fn func() // should always exit instead of returning
+ arg func(t *testing.T) string // generate argument for test
+ cleanup func(arg string) error // for instance, delete coredumps from testing pledge
+ success bool // whether zero-exit means success or failure
+}
+
+var (
+ testProcs = map[string]testProc{}
+ procName = ""
+ procArg = ""
+)
+
+const (
+ optName = "sys-unix-internal-procname"
+ optArg = "sys-unix-internal-arg"
+)
+
+func init() {
+ flag.StringVar(&procName, optName, "", "internal use only")
+ flag.StringVar(&procArg, optArg, "", "internal use only")
+
+}
+
+func testCmd(procName string, procArg string) (*exec.Cmd, error) {
+ exe, err := filepath.Abs(os.Args[0])
+ if err != nil {
+ return nil, err
+ }
+ cmd := exec.Command(exe, "-"+optName+"="+procName, "-"+optArg+"="+procArg)
+ cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
+ return cmd, nil
+}
+
+// ExitsCorrectly is a comprehensive, one-line-of-use wrapper for testing
+// a testProc with a key.
+func ExitsCorrectly(t *testing.T, procName string) {
+ s := testProcs[procName]
+ arg := "-"
+ if s.arg != nil {
+ arg = s.arg(t)
+ }
+ c, err := testCmd(procName, arg)
+ defer func(arg string) {
+ if err := s.cleanup(arg); err != nil {
+ t.Fatalf("Failed to run cleanup for %s %s %#v", procName, err, err)
+ }
+ }(arg)
+ if err != nil {
+ t.Fatalf("Failed to construct command for %s", procName)
+ }
+ if (c.Run() == nil) != s.success {
+ result := "succeed"
+ if !s.success {
+ result = "fail"
+ }
+ t.Fatalf("Process did not %s when it was supposed to", result)
+ }
+}
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+ if procName != "" {
+ t := testProcs[procName]
+ t.fn()
+ os.Stderr.WriteString("test function did not exit\n")
+ if t.success {
+ os.Exit(1)
+ } else {
+ os.Exit(0)
+ }
+ }
+ os.Exit(m.Run())
+}
+
+// end of infrastructure
+
+const testfile = "gocapmodetest"
+const testfile2 = testfile + "2"
+
+func CapEnterTest() {
+ _, err := os.OpenFile(path.Join(procArg, testfile), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
+ if err != nil {
+ panic(fmt.Sprintf("OpenFile: %s", err))
+ }
+
+ err = unix.CapEnter()
+ if err != nil {
+ panic(fmt.Sprintf("CapEnter: %s", err))
+ }
+
+ _, err = os.OpenFile(path.Join(procArg, testfile2), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
+ if err == nil {
+ panic("OpenFile works!")
+ }
+ if err.(*os.PathError).Err != unix.ECAPMODE {
+ panic(fmt.Sprintf("OpenFile failed wrong: %s %#v", err, err))
+ }
+ os.Exit(0)
+}
+
+func makeTempDir(t *testing.T) string {
+ d, err := ioutil.TempDir("", "go_openat_test")
+ if err != nil {
+ t.Fatalf("TempDir failed: %s", err)
+ }
+ return d
+}
+
+func removeTempDir(arg string) error {
+ err := os.RemoveAll(arg)
+ if err != nil && err.(*os.PathError).Err == unix.ENOENT {
+ return nil
+ }
+ return err
+}
+
+func init() {
+ testProcs["cap_enter"] = testProc{
+ CapEnterTest,
+ makeTempDir,
+ removeTempDir,
+ true,
+ }
+}
+
+func TestCapEnter(t *testing.T) {
+ if runtime.GOARCH != "amd64" {
+ t.Skipf("skipping test on %s", runtime.GOARCH)
+ }
+ ExitsCorrectly(t, "cap_enter")
+}
+
+func OpenatTest() {
+ f, err := os.Open(procArg)
+ if err != nil {
+ panic(err)
+ }
+
+ err = unix.CapEnter()
+ if err != nil {
+ panic(fmt.Sprintf("CapEnter: %s", err))
+ }
+
+ fxx, err := unix.Openat(int(f.Fd()), "xx", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
+ if err != nil {
+ panic(err)
+ }
+ unix.Close(fxx)
+
+ // The right to open BASE/xx is not ambient
+ _, err = os.OpenFile(procArg+"/xx", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
+ if err == nil {
+ panic("OpenFile succeeded")
+ }
+ if err.(*os.PathError).Err != unix.ECAPMODE {
+ panic(fmt.Sprintf("OpenFile failed wrong: %s %#v", err, err))
+ }
+
+ // Can't make a new directory either
+ err = os.Mkdir(procArg+"2", 0777)
+ if err == nil {
+ panic("MKdir succeeded")
+ }
+ if err.(*os.PathError).Err != unix.ECAPMODE {
+ panic(fmt.Sprintf("Mkdir failed wrong: %s %#v", err, err))
+ }
+
+ // Remove all caps except read and lookup.
+ r, err := unix.CapRightsInit([]uint64{unix.CAP_READ, unix.CAP_LOOKUP})
+ if err != nil {
+ panic(fmt.Sprintf("CapRightsInit failed: %s %#v", err, err))
+ }
+ err = unix.CapRightsLimit(f.Fd(), r)
+ if err != nil {
+ panic(fmt.Sprintf("CapRightsLimit failed: %s %#v", err, err))
+ }
+
+ // Check we can get the rights back again
+ r, err = unix.CapRightsGet(f.Fd())
+ if err != nil {
+ panic(fmt.Sprintf("CapRightsGet failed: %s %#v", err, err))
+ }
+ b, err := unix.CapRightsIsSet(r, []uint64{unix.CAP_READ, unix.CAP_LOOKUP})
+ if err != nil {
+ panic(fmt.Sprintf("CapRightsIsSet failed: %s %#v", err, err))
+ }
+ if !b {
+ panic(fmt.Sprintf("Unexpected rights"))
+ }
+ b, err = unix.CapRightsIsSet(r, []uint64{unix.CAP_READ, unix.CAP_LOOKUP, unix.CAP_WRITE})
+ if err != nil {
+ panic(fmt.Sprintf("CapRightsIsSet failed: %s %#v", err, err))
+ }
+ if b {
+ panic(fmt.Sprintf("Unexpected rights (2)"))
+ }
+
+ // Can no longer create a file
+ _, err = unix.Openat(int(f.Fd()), "xx2", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
+ if err == nil {
+ panic("Openat succeeded")
+ }
+ if err != unix.ENOTCAPABLE {
+ panic(fmt.Sprintf("OpenFileAt failed wrong: %s %#v", err, err))
+ }
+
+ // But can read an existing one
+ _, err = unix.Openat(int(f.Fd()), "xx", os.O_RDONLY, 0666)
+ if err != nil {
+ panic(fmt.Sprintf("Openat failed: %s %#v", err, err))
+ }
+
+ os.Exit(0)
+}
+
+func init() {
+ testProcs["openat"] = testProc{
+ OpenatTest,
+ makeTempDir,
+ removeTempDir,
+ true,
+ }
+}
+
+func TestOpenat(t *testing.T) {
+ if runtime.GOARCH != "amd64" {
+ t.Skipf("skipping test on %s", runtime.GOARCH)
+ }
+ ExitsCorrectly(t, "openat")
+}
+
+func TestCapRightsSetAndClear(t *testing.T) {
+ r, err := unix.CapRightsInit([]uint64{unix.CAP_READ, unix.CAP_WRITE, unix.CAP_PDWAIT})
+ if err != nil {
+ t.Fatalf("CapRightsInit failed: %s", err)
+ }
+
+ err = unix.CapRightsSet(r, []uint64{unix.CAP_EVENT, unix.CAP_LISTEN})
+ if err != nil {
+ t.Fatalf("CapRightsSet failed: %s", err)
+ }
+
+ b, err := unix.CapRightsIsSet(r, []uint64{unix.CAP_READ, unix.CAP_WRITE, unix.CAP_PDWAIT, unix.CAP_EVENT, unix.CAP_LISTEN})
+ if err != nil {
+ t.Fatalf("CapRightsIsSet failed: %s", err)
+ }
+ if !b {
+ t.Fatalf("Wrong rights set")
+ }
+
+ err = unix.CapRightsClear(r, []uint64{unix.CAP_READ, unix.CAP_PDWAIT})
+ if err != nil {
+ t.Fatalf("CapRightsClear failed: %s", err)
+ }
+
+ b, err = unix.CapRightsIsSet(r, []uint64{unix.CAP_WRITE, unix.CAP_EVENT, unix.CAP_LISTEN})
+ if err != nil {
+ t.Fatalf("CapRightsIsSet failed: %s", err)
+ }
+ if !b {
+ t.Fatalf("Wrong rights set")
+ }
+}
+
+// stringsFromByteSlice converts a sequence of attributes to a []string.
+// On FreeBSD, each entry consists of a single byte containing the length
+// of the attribute name, followed by the attribute name.
+// The name is _not_ NULL-terminated.
+func stringsFromByteSlice(buf []byte) []string {
+ var result []string
+ i := 0
+ for i < len(buf) {
+ next := i + 1 + int(buf[i])
+ result = append(result, string(buf[i+1:next]))
+ i = next
+ }
+ return result
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_test.go b/vendor/golang.org/x/sys/unix/syscall_linux_test.go
new file mode 100644
index 0000000..7fd5e2a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_test.go
@@ -0,0 +1,386 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package unix_test
+
+import (
+ "os"
+ "runtime"
+ "runtime/debug"
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestIoctlGetInt(t *testing.T) {
+ f, err := os.Open("/dev/random")
+ if err != nil {
+ t.Fatalf("failed to open device: %v", err)
+ }
+ defer f.Close()
+
+ v, err := unix.IoctlGetInt(int(f.Fd()), unix.RNDGETENTCNT)
+ if err != nil {
+ t.Fatalf("failed to perform ioctl: %v", err)
+ }
+
+ t.Logf("%d bits of entropy available", v)
+}
+
+func TestPpoll(t *testing.T) {
+ if runtime.GOOS == "android" {
+ t.Skip("mkfifo syscall is not available on android, skipping test")
+ }
+
+ f, cleanup := mktmpfifo(t)
+ defer cleanup()
+
+ const timeout = 100 * time.Millisecond
+
+ ok := make(chan bool, 1)
+ go func() {
+ select {
+ case <-time.After(10 * timeout):
+ t.Errorf("Ppoll: failed to timeout after %d", 10*timeout)
+ case <-ok:
+ }
+ }()
+
+ fds := []unix.PollFd{{Fd: int32(f.Fd()), Events: unix.POLLIN}}
+ timeoutTs := unix.NsecToTimespec(int64(timeout))
+ n, err := unix.Ppoll(fds, &timeoutTs, nil)
+ ok <- true
+ if err != nil {
+ t.Errorf("Ppoll: unexpected error: %v", err)
+ return
+ }
+ if n != 0 {
+ t.Errorf("Ppoll: wrong number of events: got %v, expected %v", n, 0)
+ return
+ }
+}
+
+func TestTime(t *testing.T) {
+ var ut unix.Time_t
+ ut2, err := unix.Time(&ut)
+ if err != nil {
+ t.Fatalf("Time: %v", err)
+ }
+ if ut != ut2 {
+ t.Errorf("Time: return value %v should be equal to argument %v", ut2, ut)
+ }
+
+ var now time.Time
+
+ for i := 0; i < 10; i++ {
+ ut, err = unix.Time(nil)
+ if err != nil {
+ t.Fatalf("Time: %v", err)
+ }
+
+ now = time.Now()
+
+ if int64(ut) == now.Unix() {
+ return
+ }
+ }
+
+ t.Errorf("Time: return value %v should be nearly equal to time.Now().Unix() %v", ut, now.Unix())
+}
+
+func TestUtime(t *testing.T) {
+ defer chtmpdir(t)()
+
+ touch(t, "file1")
+
+ buf := &unix.Utimbuf{
+ Modtime: 12345,
+ }
+
+ err := unix.Utime("file1", buf)
+ if err != nil {
+ t.Fatalf("Utime: %v", err)
+ }
+
+ fi, err := os.Stat("file1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if fi.ModTime().Unix() != 12345 {
+ t.Errorf("Utime: failed to change modtime: expected %v, got %v", 12345, fi.ModTime().Unix())
+ }
+}
+
+func TestUtimesNanoAt(t *testing.T) {
+ defer chtmpdir(t)()
+
+ symlink := "symlink1"
+ os.Remove(symlink)
+ err := os.Symlink("nonexisting", symlink)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ts := []unix.Timespec{
+ {Sec: 1111, Nsec: 2222},
+ {Sec: 3333, Nsec: 4444},
+ }
+ err = unix.UtimesNanoAt(unix.AT_FDCWD, symlink, ts, unix.AT_SYMLINK_NOFOLLOW)
+ if err != nil {
+ t.Fatalf("UtimesNanoAt: %v", err)
+ }
+
+ var st unix.Stat_t
+ err = unix.Lstat(symlink, &st)
+ if err != nil {
+ t.Fatalf("Lstat: %v", err)
+ }
+ if st.Atim != ts[0] {
+ t.Errorf("UtimesNanoAt: wrong atime: %v", st.Atim)
+ }
+ if st.Mtim != ts[1] {
+ t.Errorf("UtimesNanoAt: wrong mtime: %v", st.Mtim)
+ }
+}
+
+func TestRlimitAs(t *testing.T) {
+ // disable GC during to avoid flaky test
+ defer debug.SetGCPercent(debug.SetGCPercent(-1))
+
+ var rlim unix.Rlimit
+ err := unix.Getrlimit(unix.RLIMIT_AS, &rlim)
+ if err != nil {
+ t.Fatalf("Getrlimit: %v", err)
+ }
+ var zero unix.Rlimit
+ if zero == rlim {
+ t.Fatalf("Getrlimit: got zero value %#v", rlim)
+ }
+ set := rlim
+ set.Cur = uint64(unix.Getpagesize())
+ err = unix.Setrlimit(unix.RLIMIT_AS, &set)
+ if err != nil {
+ t.Fatalf("Setrlimit: set failed: %#v %v", set, err)
+ }
+
+ // RLIMIT_AS was set to the page size, so mmap()'ing twice the page size
+ // should fail. See 'man 2 getrlimit'.
+ _, err = unix.Mmap(-1, 0, 2*unix.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_PRIVATE)
+ if err == nil {
+ t.Fatal("Mmap: unexpectedly suceeded after setting RLIMIT_AS")
+ }
+
+ err = unix.Setrlimit(unix.RLIMIT_AS, &rlim)
+ if err != nil {
+ t.Fatalf("Setrlimit: restore failed: %#v %v", rlim, err)
+ }
+
+ b, err := unix.Mmap(-1, 0, 2*unix.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_PRIVATE)
+ if err != nil {
+ t.Fatalf("Mmap: %v", err)
+ }
+ err = unix.Munmap(b)
+ if err != nil {
+ t.Fatalf("Munmap: %v", err)
+ }
+}
+
+func TestSelect(t *testing.T) {
+ _, err := unix.Select(0, nil, nil, nil, &unix.Timeval{Sec: 0, Usec: 0})
+ if err != nil {
+ t.Fatalf("Select: %v", err)
+ }
+
+ dur := 150 * time.Millisecond
+ tv := unix.NsecToTimeval(int64(dur))
+ start := time.Now()
+ _, err = unix.Select(0, nil, nil, nil, &tv)
+ took := time.Since(start)
+ if err != nil {
+ t.Fatalf("Select: %v", err)
+ }
+
+ if took < dur {
+ t.Errorf("Select: timeout should have been at least %v, got %v", dur, took)
+ }
+}
+
+func TestPselect(t *testing.T) {
+ _, err := unix.Pselect(0, nil, nil, nil, &unix.Timespec{Sec: 0, Nsec: 0}, nil)
+ if err != nil {
+ t.Fatalf("Pselect: %v", err)
+ }
+
+ dur := 2500 * time.Microsecond
+ ts := unix.NsecToTimespec(int64(dur))
+ start := time.Now()
+ _, err = unix.Pselect(0, nil, nil, nil, &ts, nil)
+ took := time.Since(start)
+ if err != nil {
+ t.Fatalf("Pselect: %v", err)
+ }
+
+ if took < dur {
+ t.Errorf("Pselect: timeout should have been at least %v, got %v", dur, took)
+ }
+}
+
+func TestSchedSetaffinity(t *testing.T) {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ var oldMask unix.CPUSet
+ err := unix.SchedGetaffinity(0, &oldMask)
+ if err != nil {
+ t.Fatalf("SchedGetaffinity: %v", err)
+ }
+
+ var newMask unix.CPUSet
+ newMask.Zero()
+ if newMask.Count() != 0 {
+ t.Errorf("CpuZero: didn't zero CPU set: %v", newMask)
+ }
+ cpu := 1
+ newMask.Set(cpu)
+ if newMask.Count() != 1 || !newMask.IsSet(cpu) {
+ t.Errorf("CpuSet: didn't set CPU %d in set: %v", cpu, newMask)
+ }
+ cpu = 5
+ newMask.Set(cpu)
+ if newMask.Count() != 2 || !newMask.IsSet(cpu) {
+ t.Errorf("CpuSet: didn't set CPU %d in set: %v", cpu, newMask)
+ }
+ newMask.Clear(cpu)
+ if newMask.Count() != 1 || newMask.IsSet(cpu) {
+ t.Errorf("CpuClr: didn't clear CPU %d in set: %v", cpu, newMask)
+ }
+
+ if runtime.NumCPU() < 2 {
+ t.Skip("skipping setaffinity tests on single CPU system")
+ }
+ if runtime.GOOS == "android" {
+ t.Skip("skipping setaffinity tests on android")
+ }
+
+ err = unix.SchedSetaffinity(0, &newMask)
+ if err != nil {
+ t.Fatalf("SchedSetaffinity: %v", err)
+ }
+
+ var gotMask unix.CPUSet
+ err = unix.SchedGetaffinity(0, &gotMask)
+ if err != nil {
+ t.Fatalf("SchedGetaffinity: %v", err)
+ }
+
+ if gotMask != newMask {
+ t.Errorf("SchedSetaffinity: returned affinity mask does not match set affinity mask")
+ }
+
+ // Restore old mask so it doesn't affect successive tests
+ err = unix.SchedSetaffinity(0, &oldMask)
+ if err != nil {
+ t.Fatalf("SchedSetaffinity: %v", err)
+ }
+}
+
+func TestStatx(t *testing.T) {
+ var stx unix.Statx_t
+ err := unix.Statx(unix.AT_FDCWD, ".", 0, 0, &stx)
+ if err == unix.ENOSYS || err == unix.EPERM {
+ t.Skip("statx syscall is not available, skipping test")
+ } else if err != nil {
+ t.Fatalf("Statx: %v", err)
+ }
+
+ defer chtmpdir(t)()
+ touch(t, "file1")
+
+ var st unix.Stat_t
+ err = unix.Stat("file1", &st)
+ if err != nil {
+ t.Fatalf("Stat: %v", err)
+ }
+
+ flags := unix.AT_STATX_SYNC_AS_STAT
+ err = unix.Statx(unix.AT_FDCWD, "file1", flags, unix.STATX_ALL, &stx)
+ if err != nil {
+ t.Fatalf("Statx: %v", err)
+ }
+
+ if uint32(stx.Mode) != st.Mode {
+ t.Errorf("Statx: returned stat mode does not match Stat")
+ }
+
+ ctime := unix.StatxTimestamp{Sec: int64(st.Ctim.Sec), Nsec: uint32(st.Ctim.Nsec)}
+ mtime := unix.StatxTimestamp{Sec: int64(st.Mtim.Sec), Nsec: uint32(st.Mtim.Nsec)}
+
+ if stx.Ctime != ctime {
+ t.Errorf("Statx: returned stat ctime does not match Stat")
+ }
+ if stx.Mtime != mtime {
+ t.Errorf("Statx: returned stat mtime does not match Stat")
+ }
+
+ err = os.Symlink("file1", "symlink1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = unix.Lstat("symlink1", &st)
+ if err != nil {
+ t.Fatalf("Lstat: %v", err)
+ }
+
+ err = unix.Statx(unix.AT_FDCWD, "symlink1", flags, unix.STATX_BASIC_STATS, &stx)
+ if err != nil {
+ t.Fatalf("Statx: %v", err)
+ }
+
+ // follow symlink, expect a regulat file
+ if stx.Mode&unix.S_IFREG == 0 {
+ t.Errorf("Statx: didn't follow symlink")
+ }
+
+ err = unix.Statx(unix.AT_FDCWD, "symlink1", flags|unix.AT_SYMLINK_NOFOLLOW, unix.STATX_ALL, &stx)
+ if err != nil {
+ t.Fatalf("Statx: %v", err)
+ }
+
+ // follow symlink, expect a symlink
+ if stx.Mode&unix.S_IFLNK == 0 {
+ t.Errorf("Statx: unexpectedly followed symlink")
+ }
+ if uint32(stx.Mode) != st.Mode {
+ t.Errorf("Statx: returned stat mode does not match Lstat")
+ }
+
+ ctime = unix.StatxTimestamp{Sec: int64(st.Ctim.Sec), Nsec: uint32(st.Ctim.Nsec)}
+ mtime = unix.StatxTimestamp{Sec: int64(st.Mtim.Sec), Nsec: uint32(st.Mtim.Nsec)}
+
+ if stx.Ctime != ctime {
+ t.Errorf("Statx: returned stat ctime does not match Lstat")
+ }
+ if stx.Mtime != mtime {
+ t.Errorf("Statx: returned stat mtime does not match Lstat")
+ }
+}
+
+// stringsFromByteSlice converts a sequence of attributes to a []string.
+// On Linux, each entry is a NULL-terminated string.
+func stringsFromByteSlice(buf []byte) []string {
+ var result []string
+ off := 0
+ for i, b := range buf {
+ if b == 0 {
+ result = append(result, string(buf[off:i]))
+ off = i + 1
+ }
+ }
+ return result
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_test.go b/vendor/golang.org/x/sys/unix/syscall_solaris_test.go
new file mode 100644
index 0000000..57dba88
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris_test.go
@@ -0,0 +1,55 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package unix_test
+
+import (
+ "os/exec"
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestSelect(t *testing.T) {
+ err := unix.Select(0, nil, nil, nil, &unix.Timeval{Sec: 0, Usec: 0})
+ if err != nil {
+ t.Fatalf("Select: %v", err)
+ }
+
+ dur := 150 * time.Millisecond
+ tv := unix.NsecToTimeval(int64(dur))
+ start := time.Now()
+ err = unix.Select(0, nil, nil, nil, &tv)
+ took := time.Since(start)
+ if err != nil {
+ t.Fatalf("Select: %v", err)
+ }
+
+ if took < dur {
+ t.Errorf("Select: timeout should have been at least %v, got %v", dur, took)
+ }
+}
+
+func TestStatvfs(t *testing.T) {
+ if err := unix.Statvfs("", nil); err == nil {
+ t.Fatal(`Statvfs("") expected failure`)
+ }
+
+ statvfs := unix.Statvfs_t{}
+ if err := unix.Statvfs("/", &statvfs); err != nil {
+ t.Errorf(`Statvfs("/") failed: %v`, err)
+ }
+
+ if t.Failed() {
+ mount, err := exec.Command("mount").CombinedOutput()
+ if err != nil {
+ t.Logf("mount: %v\n%s", err, mount)
+ } else {
+ t.Logf("mount: %s", mount)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_test.go b/vendor/golang.org/x/sys/unix/syscall_test.go
new file mode 100644
index 0000000..a8eef7c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_test.go
@@ -0,0 +1,60 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix_test
+
+import (
+ "fmt"
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
+func testSetGetenv(t *testing.T, key, value string) {
+ err := unix.Setenv(key, value)
+ if err != nil {
+ t.Fatalf("Setenv failed to set %q: %v", value, err)
+ }
+ newvalue, found := unix.Getenv(key)
+ if !found {
+ t.Fatalf("Getenv failed to find %v variable (want value %q)", key, value)
+ }
+ if newvalue != value {
+ t.Fatalf("Getenv(%v) = %q; want %q", key, newvalue, value)
+ }
+}
+
+func TestEnv(t *testing.T) {
+ testSetGetenv(t, "TESTENV", "AVALUE")
+ // make sure TESTENV gets set to "", not deleted
+ testSetGetenv(t, "TESTENV", "")
+}
+
+func TestItoa(t *testing.T) {
+ // Make most negative integer: 0x8000...
+ i := 1
+ for i<<1 != 0 {
+ i <<= 1
+ }
+ if i >= 0 {
+ t.Fatal("bad math")
+ }
+ s := unix.Itoa(i)
+ f := fmt.Sprint(i)
+ if s != f {
+ t.Fatalf("itoa(%d) = %s, want %s", i, s, f)
+ }
+}
+
+func TestUname(t *testing.T) {
+ var utsname unix.Utsname
+ err := unix.Uname(&utsname)
+ if err != nil {
+ t.Fatalf("Uname: %v", err)
+ }
+
+ t.Logf("OS: %s/%s %s", utsname.Sysname[:], utsname.Machine[:], utsname.Release[:])
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_test.go b/vendor/golang.org/x/sys/unix/syscall_unix_test.go
new file mode 100644
index 0000000..ad09716
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_unix_test.go
@@ -0,0 +1,639 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix_test
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "syscall"
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// Tests that below functions, structures and constants are consistent
+// on all Unix-like systems.
+func _() {
+ // program scheduling priority functions and constants
+ var (
+ _ func(int, int, int) error = unix.Setpriority
+ _ func(int, int) (int, error) = unix.Getpriority
+ )
+ const (
+ _ int = unix.PRIO_USER
+ _ int = unix.PRIO_PROCESS
+ _ int = unix.PRIO_PGRP
+ )
+
+ // termios constants
+ const (
+ _ int = unix.TCIFLUSH
+ _ int = unix.TCIOFLUSH
+ _ int = unix.TCOFLUSH
+ )
+
+ // fcntl file locking structure and constants
+ var (
+ _ = unix.Flock_t{
+ Type: int16(0),
+ Whence: int16(0),
+ Start: int64(0),
+ Len: int64(0),
+ Pid: int32(0),
+ }
+ )
+ const (
+ _ = unix.F_GETLK
+ _ = unix.F_SETLK
+ _ = unix.F_SETLKW
+ )
+}
+
+func TestErrnoSignalName(t *testing.T) {
+ testErrors := []struct {
+ num syscall.Errno
+ name string
+ }{
+ {syscall.EPERM, "EPERM"},
+ {syscall.EINVAL, "EINVAL"},
+ {syscall.ENOENT, "ENOENT"},
+ }
+
+ for _, te := range testErrors {
+ t.Run(fmt.Sprintf("%d/%s", te.num, te.name), func(t *testing.T) {
+ e := unix.ErrnoName(te.num)
+ if e != te.name {
+ t.Errorf("ErrnoName(%d) returned %s, want %s", te.num, e, te.name)
+ }
+ })
+ }
+
+ testSignals := []struct {
+ num syscall.Signal
+ name string
+ }{
+ {syscall.SIGHUP, "SIGHUP"},
+ {syscall.SIGPIPE, "SIGPIPE"},
+ {syscall.SIGSEGV, "SIGSEGV"},
+ }
+
+ for _, ts := range testSignals {
+ t.Run(fmt.Sprintf("%d/%s", ts.num, ts.name), func(t *testing.T) {
+ s := unix.SignalName(ts.num)
+ if s != ts.name {
+ t.Errorf("SignalName(%d) returned %s, want %s", ts.num, s, ts.name)
+ }
+ })
+ }
+}
+
+// TestFcntlFlock tests whether the file locking structure matches
+// the calling convention of each kernel.
+func TestFcntlFlock(t *testing.T) {
+ name := filepath.Join(os.TempDir(), "TestFcntlFlock")
+ fd, err := unix.Open(name, unix.O_CREAT|unix.O_RDWR|unix.O_CLOEXEC, 0)
+ if err != nil {
+ t.Fatalf("Open failed: %v", err)
+ }
+ defer unix.Unlink(name)
+ defer unix.Close(fd)
+ flock := unix.Flock_t{
+ Type: unix.F_RDLCK,
+ Start: 0, Len: 0, Whence: 1,
+ }
+ if err := unix.FcntlFlock(uintptr(fd), unix.F_GETLK, &flock); err != nil {
+ t.Fatalf("FcntlFlock failed: %v", err)
+ }
+}
+
+// TestPassFD tests passing a file descriptor over a Unix socket.
+//
+// This test involved both a parent and child process. The parent
+// process is invoked as a normal test, with "go test", which then
+// runs the child process by running the current test binary with args
+// "-test.run=^TestPassFD$" and an environment variable used to signal
+// that the test should become the child process instead.
+func TestPassFD(t *testing.T) {
+ if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") {
+ t.Skip("cannot exec subprocess on iOS, skipping test")
+ }
+
+ if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+ passFDChild()
+ return
+ }
+
+ tempDir, err := ioutil.TempDir("", "TestPassFD")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tempDir)
+
+ fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM, 0)
+ if err != nil {
+ t.Fatalf("Socketpair: %v", err)
+ }
+ defer unix.Close(fds[0])
+ defer unix.Close(fds[1])
+ writeFile := os.NewFile(uintptr(fds[0]), "child-writes")
+ readFile := os.NewFile(uintptr(fds[1]), "parent-reads")
+ defer writeFile.Close()
+ defer readFile.Close()
+
+ cmd := exec.Command(os.Args[0], "-test.run=^TestPassFD$", "--", tempDir)
+ cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
+ if lp := os.Getenv("LD_LIBRARY_PATH"); lp != "" {
+ cmd.Env = append(cmd.Env, "LD_LIBRARY_PATH="+lp)
+ }
+ cmd.ExtraFiles = []*os.File{writeFile}
+
+ out, err := cmd.CombinedOutput()
+ if len(out) > 0 || err != nil {
+ t.Fatalf("child process: %q, %v", out, err)
+ }
+
+ c, err := net.FileConn(readFile)
+ if err != nil {
+ t.Fatalf("FileConn: %v", err)
+ }
+ defer c.Close()
+
+ uc, ok := c.(*net.UnixConn)
+ if !ok {
+ t.Fatalf("unexpected FileConn type; expected UnixConn, got %T", c)
+ }
+
+ buf := make([]byte, 32) // expect 1 byte
+ oob := make([]byte, 32) // expect 24 bytes
+ closeUnix := time.AfterFunc(5*time.Second, func() {
+ t.Logf("timeout reading from unix socket")
+ uc.Close()
+ })
+ _, oobn, _, _, err := uc.ReadMsgUnix(buf, oob)
+ if err != nil {
+ t.Fatalf("ReadMsgUnix: %v", err)
+ }
+ closeUnix.Stop()
+
+ scms, err := unix.ParseSocketControlMessage(oob[:oobn])
+ if err != nil {
+ t.Fatalf("ParseSocketControlMessage: %v", err)
+ }
+ if len(scms) != 1 {
+ t.Fatalf("expected 1 SocketControlMessage; got scms = %#v", scms)
+ }
+ scm := scms[0]
+ gotFds, err := unix.ParseUnixRights(&scm)
+ if err != nil {
+ t.Fatalf("unix.ParseUnixRights: %v", err)
+ }
+ if len(gotFds) != 1 {
+ t.Fatalf("wanted 1 fd; got %#v", gotFds)
+ }
+
+ f := os.NewFile(uintptr(gotFds[0]), "fd-from-child")
+ defer f.Close()
+
+ got, err := ioutil.ReadAll(f)
+ want := "Hello from child process!\n"
+ if string(got) != want {
+ t.Errorf("child process ReadAll: %q, %v; want %q", got, err, want)
+ }
+}
+
+// passFDChild is the child process used by TestPassFD.
+func passFDChild() {
+ defer os.Exit(0)
+
+ // Look for our fd. It should be fd 3, but we work around an fd leak
+ // bug here (http://golang.org/issue/2603) to let it be elsewhere.
+ var uc *net.UnixConn
+ for fd := uintptr(3); fd <= 10; fd++ {
+ f := os.NewFile(fd, "unix-conn")
+ var ok bool
+ netc, _ := net.FileConn(f)
+ uc, ok = netc.(*net.UnixConn)
+ if ok {
+ break
+ }
+ }
+ if uc == nil {
+ fmt.Println("failed to find unix fd")
+ return
+ }
+
+ // Make a file f to send to our parent process on uc.
+ // We make it in tempDir, which our parent will clean up.
+ flag.Parse()
+ tempDir := flag.Arg(0)
+ f, err := ioutil.TempFile(tempDir, "")
+ if err != nil {
+ fmt.Printf("TempFile: %v", err)
+ return
+ }
+
+ f.Write([]byte("Hello from child process!\n"))
+ f.Seek(0, 0)
+
+ rights := unix.UnixRights(int(f.Fd()))
+ dummyByte := []byte("x")
+ n, oobn, err := uc.WriteMsgUnix(dummyByte, rights, nil)
+ if err != nil {
+ fmt.Printf("WriteMsgUnix: %v", err)
+ return
+ }
+ if n != 1 || oobn != len(rights) {
+ fmt.Printf("WriteMsgUnix = %d, %d; want 1, %d", n, oobn, len(rights))
+ return
+ }
+}
+
+// TestUnixRightsRoundtrip tests that UnixRights, ParseSocketControlMessage,
+// and ParseUnixRights are able to successfully round-trip lists of file descriptors.
+func TestUnixRightsRoundtrip(t *testing.T) {
+ testCases := [...][][]int{
+ {{42}},
+ {{1, 2}},
+ {{3, 4, 5}},
+ {{}},
+ {{1, 2}, {3, 4, 5}, {}, {7}},
+ }
+ for _, testCase := range testCases {
+ b := []byte{}
+ var n int
+ for _, fds := range testCase {
+ // Last assignment to n wins
+ n = len(b) + unix.CmsgLen(4*len(fds))
+ b = append(b, unix.UnixRights(fds...)...)
+ }
+ // Truncate b
+ b = b[:n]
+
+ scms, err := unix.ParseSocketControlMessage(b)
+ if err != nil {
+ t.Fatalf("ParseSocketControlMessage: %v", err)
+ }
+ if len(scms) != len(testCase) {
+ t.Fatalf("expected %v SocketControlMessage; got scms = %#v", len(testCase), scms)
+ }
+ for i, scm := range scms {
+ gotFds, err := unix.ParseUnixRights(&scm)
+ if err != nil {
+ t.Fatalf("ParseUnixRights: %v", err)
+ }
+ wantFds := testCase[i]
+ if len(gotFds) != len(wantFds) {
+ t.Fatalf("expected %v fds, got %#v", len(wantFds), gotFds)
+ }
+ for j, fd := range gotFds {
+ if fd != wantFds[j] {
+ t.Fatalf("expected fd %v, got %v", wantFds[j], fd)
+ }
+ }
+ }
+ }
+}
+
+func TestRlimit(t *testing.T) {
+ var rlimit, zero unix.Rlimit
+ err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit)
+ if err != nil {
+ t.Fatalf("Getrlimit: save failed: %v", err)
+ }
+ if zero == rlimit {
+ t.Fatalf("Getrlimit: save failed: got zero value %#v", rlimit)
+ }
+ set := rlimit
+ set.Cur = set.Max - 1
+ err = unix.Setrlimit(unix.RLIMIT_NOFILE, &set)
+ if err != nil {
+ t.Fatalf("Setrlimit: set failed: %#v %v", set, err)
+ }
+ var get unix.Rlimit
+ err = unix.Getrlimit(unix.RLIMIT_NOFILE, &get)
+ if err != nil {
+ t.Fatalf("Getrlimit: get failed: %v", err)
+ }
+ set = rlimit
+ set.Cur = set.Max - 1
+ if set != get {
+ // Seems like Darwin requires some privilege to
+ // increase the soft limit of rlimit sandbox, though
+ // Setrlimit never reports an error.
+ switch runtime.GOOS {
+ case "darwin":
+ default:
+ t.Fatalf("Rlimit: change failed: wanted %#v got %#v", set, get)
+ }
+ }
+ err = unix.Setrlimit(unix.RLIMIT_NOFILE, &rlimit)
+ if err != nil {
+ t.Fatalf("Setrlimit: restore failed: %#v %v", rlimit, err)
+ }
+}
+
+func TestSeekFailure(t *testing.T) {
+ _, err := unix.Seek(-1, 0, 0)
+ if err == nil {
+ t.Fatalf("Seek(-1, 0, 0) did not fail")
+ }
+ str := err.Error() // used to crash on Linux
+ t.Logf("Seek: %v", str)
+ if str == "" {
+ t.Fatalf("Seek(-1, 0, 0) return error with empty message")
+ }
+}
+
+func TestDup(t *testing.T) {
+ file, err := ioutil.TempFile("", "TestDup")
+ if err != nil {
+ t.Fatalf("Tempfile failed: %v", err)
+ }
+ defer os.Remove(file.Name())
+ defer file.Close()
+ f := int(file.Fd())
+
+ newFd, err := unix.Dup(f)
+ if err != nil {
+ t.Fatalf("Dup: %v", err)
+ }
+
+ err = unix.Dup2(newFd, newFd+1)
+ if err != nil {
+ t.Fatalf("Dup2: %v", err)
+ }
+
+ b1 := []byte("Test123")
+ b2 := make([]byte, 7)
+ _, err = unix.Write(newFd+1, b1)
+ if err != nil {
+ t.Fatalf("Write to dup2 fd failed: %v", err)
+ }
+ _, err = unix.Seek(f, 0, 0)
+ if err != nil {
+ t.Fatalf("Seek failed: %v", err)
+ }
+ _, err = unix.Read(f, b2)
+ if err != nil {
+ t.Fatalf("Read back failed: %v", err)
+ }
+ if string(b1) != string(b2) {
+ t.Errorf("Dup: stdout write not in file, expected %v, got %v", string(b1), string(b2))
+ }
+}
+
+func TestPoll(t *testing.T) {
+ if runtime.GOOS == "android" ||
+ (runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64")) {
+ t.Skip("mkfifo syscall is not available on android and iOS, skipping test")
+ }
+
+ f, cleanup := mktmpfifo(t)
+ defer cleanup()
+
+ const timeout = 100
+
+ ok := make(chan bool, 1)
+ go func() {
+ select {
+ case <-time.After(10 * timeout * time.Millisecond):
+ t.Errorf("Poll: failed to timeout after %d milliseconds", 10*timeout)
+ case <-ok:
+ }
+ }()
+
+ fds := []unix.PollFd{{Fd: int32(f.Fd()), Events: unix.POLLIN}}
+ n, err := unix.Poll(fds, timeout)
+ ok <- true
+ if err != nil {
+ t.Errorf("Poll: unexpected error: %v", err)
+ return
+ }
+ if n != 0 {
+ t.Errorf("Poll: wrong number of events: got %v, expected %v", n, 0)
+ return
+ }
+}
+
+func TestGetwd(t *testing.T) {
+ fd, err := os.Open(".")
+ if err != nil {
+ t.Fatalf("Open .: %s", err)
+ }
+ defer fd.Close()
+ // These are chosen carefully not to be symlinks on a Mac
+ // (unlike, say, /var, /etc)
+ dirs := []string{"/", "/usr/bin"}
+ switch runtime.GOOS {
+ case "android":
+ dirs = []string{"/", "/system/bin"}
+ case "darwin":
+ switch runtime.GOARCH {
+ case "arm", "arm64":
+ d1, err := ioutil.TempDir("", "d1")
+ if err != nil {
+ t.Fatalf("TempDir: %v", err)
+ }
+ d2, err := ioutil.TempDir("", "d2")
+ if err != nil {
+ t.Fatalf("TempDir: %v", err)
+ }
+ dirs = []string{d1, d2}
+ }
+ }
+ oldwd := os.Getenv("PWD")
+ for _, d := range dirs {
+ err = os.Chdir(d)
+ if err != nil {
+ t.Fatalf("Chdir: %v", err)
+ }
+ pwd, err := unix.Getwd()
+ if err != nil {
+ t.Fatalf("Getwd in %s: %s", d, err)
+ }
+ os.Setenv("PWD", oldwd)
+ err = fd.Chdir()
+ if err != nil {
+ // We changed the current directory and cannot go back.
+ // Don't let the tests continue; they'll scribble
+ // all over some other directory.
+ fmt.Fprintf(os.Stderr, "fchdir back to dot failed: %s\n", err)
+ os.Exit(1)
+ }
+ if pwd != d {
+ t.Fatalf("Getwd returned %q want %q", pwd, d)
+ }
+ }
+}
+
+func TestFstatat(t *testing.T) {
+ defer chtmpdir(t)()
+
+ touch(t, "file1")
+
+ var st1 unix.Stat_t
+ err := unix.Stat("file1", &st1)
+ if err != nil {
+ t.Fatalf("Stat: %v", err)
+ }
+
+ var st2 unix.Stat_t
+ err = unix.Fstatat(unix.AT_FDCWD, "file1", &st2, 0)
+ if err != nil {
+ t.Fatalf("Fstatat: %v", err)
+ }
+
+ if st1 != st2 {
+ t.Errorf("Fstatat: returned stat does not match Stat")
+ }
+
+ err = os.Symlink("file1", "symlink1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = unix.Lstat("symlink1", &st1)
+ if err != nil {
+ t.Fatalf("Lstat: %v", err)
+ }
+
+ err = unix.Fstatat(unix.AT_FDCWD, "symlink1", &st2, unix.AT_SYMLINK_NOFOLLOW)
+ if err != nil {
+ t.Fatalf("Fstatat: %v", err)
+ }
+
+ if st1 != st2 {
+ t.Errorf("Fstatat: returned stat does not match Lstat")
+ }
+}
+
+func TestFchmodat(t *testing.T) {
+ defer chtmpdir(t)()
+
+ touch(t, "file1")
+ err := os.Symlink("file1", "symlink1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ mode := os.FileMode(0444)
+ err = unix.Fchmodat(unix.AT_FDCWD, "symlink1", uint32(mode), 0)
+ if err != nil {
+ t.Fatalf("Fchmodat: unexpected error: %v", err)
+ }
+
+ fi, err := os.Stat("file1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if fi.Mode() != mode {
+ t.Errorf("Fchmodat: failed to change file mode: expected %v, got %v", mode, fi.Mode())
+ }
+
+ mode = os.FileMode(0644)
+ didChmodSymlink := true
+ err = unix.Fchmodat(unix.AT_FDCWD, "symlink1", uint32(mode), unix.AT_SYMLINK_NOFOLLOW)
+ if err != nil {
+ if (runtime.GOOS == "android" || runtime.GOOS == "linux" || runtime.GOOS == "solaris") && err == unix.EOPNOTSUPP {
+ // Linux and Illumos don't support flags != 0
+ didChmodSymlink = false
+ } else {
+ t.Fatalf("Fchmodat: unexpected error: %v", err)
+ }
+ }
+
+ if !didChmodSymlink {
+ // Didn't change mode of the symlink. On Linux, the permissions
+ // of a symbolic link are always 0777 according to symlink(7)
+ mode = os.FileMode(0777)
+ }
+
+ var st unix.Stat_t
+ err = unix.Lstat("symlink1", &st)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ got := os.FileMode(st.Mode & 0777)
+ if got != mode {
+ t.Errorf("Fchmodat: failed to change symlink mode: expected %v, got %v", mode, got)
+ }
+}
+
+func TestMkdev(t *testing.T) {
+ major := uint32(42)
+ minor := uint32(7)
+ dev := unix.Mkdev(major, minor)
+
+ if unix.Major(dev) != major {
+ t.Errorf("Major(%#x) == %d, want %d", dev, unix.Major(dev), major)
+ }
+ if unix.Minor(dev) != minor {
+ t.Errorf("Minor(%#x) == %d, want %d", dev, unix.Minor(dev), minor)
+ }
+}
+
+// mktmpfifo creates a temporary FIFO and provides a cleanup function.
+func mktmpfifo(t *testing.T) (*os.File, func()) {
+ err := unix.Mkfifo("fifo", 0666)
+ if err != nil {
+ t.Fatalf("mktmpfifo: failed to create FIFO: %v", err)
+ }
+
+ f, err := os.OpenFile("fifo", os.O_RDWR, 0666)
+ if err != nil {
+ os.Remove("fifo")
+ t.Fatalf("mktmpfifo: failed to open FIFO: %v", err)
+ }
+
+ return f, func() {
+ f.Close()
+ os.Remove("fifo")
+ }
+}
+
+// utilities taken from os/os_test.go
+
+func touch(t *testing.T, name string) {
+ f, err := os.Create(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := f.Close(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// chtmpdir changes the working directory to a new temporary directory and
+// provides a cleanup function. Used when PWD is read-only.
+func chtmpdir(t *testing.T) func() {
+ oldwd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("chtmpdir: %v", err)
+ }
+ d, err := ioutil.TempDir("", "test")
+ if err != nil {
+ t.Fatalf("chtmpdir: %v", err)
+ }
+ if err := os.Chdir(d); err != nil {
+ t.Fatalf("chtmpdir: %v", err)
+ }
+ return func() {
+ if err := os.Chdir(oldwd); err != nil {
+ t.Fatalf("chtmpdir: %v", err)
+ }
+ os.RemoveAll(d)
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/timestruct_test.go b/vendor/golang.org/x/sys/unix/timestruct_test.go
new file mode 100644
index 0000000..4215f46
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/timestruct_test.go
@@ -0,0 +1,54 @@
+// Copyright 2017 The Go Authors. All right reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix_test
+
+import (
+ "testing"
+ "time"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestTimeToTimespec(t *testing.T) {
+ timeTests := []struct {
+ time time.Time
+ valid bool
+ }{
+ {time.Unix(0, 0), true},
+ {time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), true},
+ {time.Date(2262, time.December, 31, 23, 0, 0, 0, time.UTC), false},
+ {time.Unix(0x7FFFFFFF, 0), true},
+ {time.Unix(0x80000000, 0), false},
+ {time.Unix(0x7FFFFFFF, 1000000000), false},
+ {time.Unix(0x7FFFFFFF, 999999999), true},
+ {time.Unix(-0x80000000, 0), true},
+ {time.Unix(-0x80000001, 0), false},
+ {time.Date(2038, time.January, 19, 3, 14, 7, 0, time.UTC), true},
+ {time.Date(2038, time.January, 19, 3, 14, 8, 0, time.UTC), false},
+ {time.Date(1901, time.December, 13, 20, 45, 52, 0, time.UTC), true},
+ {time.Date(1901, time.December, 13, 20, 45, 51, 0, time.UTC), false},
+ }
+
+ // Currently all targets have either int32 or int64 for Timespec.Sec.
+ // If there were a new target with unsigned or floating point type for
+ // it, this test must be adjusted.
+ have64BitTime := (unsafe.Sizeof(unix.Timespec{}.Sec) == 8)
+ for _, tt := range timeTests {
+ ts, err := unix.TimeToTimespec(tt.time)
+ tt.valid = tt.valid || have64BitTime
+ if tt.valid && err != nil {
+ t.Errorf("TimeToTimespec(%v): %v", tt.time, err)
+ }
+ if err == nil {
+ tstime := time.Unix(int64(ts.Sec), int64(ts.Nsec))
+ if !tstime.Equal(tt.time) {
+ t.Errorf("TimeToTimespec(%v) is the time %v", tt.time, tstime)
+ }
+ }
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/xattr_test.go b/vendor/golang.org/x/sys/unix/xattr_test.go
new file mode 100644
index 0000000..b8b28d0
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/xattr_test.go
@@ -0,0 +1,119 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd linux
+
+package unix_test
+
+import (
+ "os"
+ "runtime"
+ "strings"
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestXattr(t *testing.T) {
+ defer chtmpdir(t)()
+
+ f := "xattr1"
+ touch(t, f)
+
+ xattrName := "user.test"
+ xattrDataSet := "gopher"
+ err := unix.Setxattr(f, xattrName, []byte(xattrDataSet), 0)
+ if err == unix.ENOTSUP || err == unix.EOPNOTSUPP {
+ t.Skip("filesystem does not support extended attributes, skipping test")
+ } else if err != nil {
+ t.Fatalf("Setxattr: %v", err)
+ }
+
+ // find size
+ size, err := unix.Listxattr(f, nil)
+ if err != nil {
+ t.Fatalf("Listxattr: %v", err)
+ }
+
+ if size <= 0 {
+ t.Fatalf("Listxattr returned an empty list of attributes")
+ }
+
+ buf := make([]byte, size)
+ read, err := unix.Listxattr(f, buf)
+ if err != nil {
+ t.Fatalf("Listxattr: %v", err)
+ }
+
+ xattrs := stringsFromByteSlice(buf[:read])
+
+ xattrWant := xattrName
+ if runtime.GOOS == "freebsd" {
+ // On FreeBSD, the namespace is stored separately from the xattr
+ // name and Listxattr doesn't return the namespace prefix.
+ xattrWant = strings.TrimPrefix(xattrWant, "user.")
+ }
+ found := false
+ for _, name := range xattrs {
+ if name == xattrWant {
+ found = true
+ }
+ }
+
+ if !found {
+ t.Errorf("Listxattr did not return previously set attribute '%s'", xattrName)
+ }
+
+ // find size
+ size, err = unix.Getxattr(f, xattrName, nil)
+ if err != nil {
+ t.Fatalf("Getxattr: %v", err)
+ }
+
+ if size <= 0 {
+ t.Fatalf("Getxattr returned an empty attribute")
+ }
+
+ xattrDataGet := make([]byte, size)
+ _, err = unix.Getxattr(f, xattrName, xattrDataGet)
+ if err != nil {
+ t.Fatalf("Getxattr: %v", err)
+ }
+
+ got := string(xattrDataGet)
+ if got != xattrDataSet {
+ t.Errorf("Getxattr: expected attribute value %s, got %s", xattrDataSet, got)
+ }
+
+ err = unix.Removexattr(f, xattrName)
+ if err != nil {
+ t.Fatalf("Removexattr: %v", err)
+ }
+
+ n := "nonexistent"
+ err = unix.Lsetxattr(n, xattrName, []byte(xattrDataSet), 0)
+ if err != unix.ENOENT {
+ t.Errorf("Lsetxattr: expected %v on non-existent file, got %v", unix.ENOENT, err)
+ }
+
+ _, err = unix.Lgetxattr(n, xattrName, nil)
+ if err != unix.ENOENT {
+ t.Errorf("Lgetxattr: %v", err)
+ }
+
+ s := "symlink1"
+ err = os.Symlink(n, s)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = unix.Lsetxattr(s, xattrName, []byte(xattrDataSet), 0)
+ if err != nil {
+ // Linux and Android doen't support xattrs on symlinks according
+ // to xattr(7), so just test that we get the proper error.
+ if (runtime.GOOS != "linux" && runtime.GOOS != "android") || err != unix.EPERM {
+ t.Fatalf("Lsetxattr: %v", err)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/sys/windows/syscall_test.go b/vendor/golang.org/x/sys/windows/syscall_test.go
new file mode 100644
index 0000000..d7009e4
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/syscall_test.go
@@ -0,0 +1,53 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package windows_test
+
+import (
+ "syscall"
+ "testing"
+
+ "golang.org/x/sys/windows"
+)
+
+func testSetGetenv(t *testing.T, key, value string) {
+ err := windows.Setenv(key, value)
+ if err != nil {
+ t.Fatalf("Setenv failed to set %q: %v", value, err)
+ }
+ newvalue, found := windows.Getenv(key)
+ if !found {
+ t.Fatalf("Getenv failed to find %v variable (want value %q)", key, value)
+ }
+ if newvalue != value {
+ t.Fatalf("Getenv(%v) = %q; want %q", key, newvalue, value)
+ }
+}
+
+func TestEnv(t *testing.T) {
+ testSetGetenv(t, "TESTENV", "AVALUE")
+ // make sure TESTENV gets set to "", not deleted
+ testSetGetenv(t, "TESTENV", "")
+}
+
+func TestGetProcAddressByOrdinal(t *testing.T) {
+ // Attempt calling shlwapi.dll:IsOS, resolving it by ordinal, as
+ // suggested in
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/bb773795.aspx
+ h, err := windows.LoadLibrary("shlwapi.dll")
+ if err != nil {
+ t.Fatalf("Failed to load shlwapi.dll: %s", err)
+ }
+ procIsOS, err := windows.GetProcAddressByOrdinal(h, 437)
+ if err != nil {
+ t.Fatalf("Could not find shlwapi.dll:IsOS by ordinal: %s", err)
+ }
+ const OS_NT = 1
+ r, _, _ := syscall.Syscall(procIsOS, 1, OS_NT, 0, 0)
+ if r == 0 {
+ t.Error("shlwapi.dll:IsOS(OS_NT) returned 0, expected non-zero value")
+ }
+}
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows_test.go b/vendor/golang.org/x/sys/windows/syscall_windows_test.go
new file mode 100644
index 0000000..0e27464
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/syscall_windows_test.go
@@ -0,0 +1,113 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows_test
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "syscall"
+ "testing"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+func TestWin32finddata(t *testing.T) {
+ dir, err := ioutil.TempDir("", "go-build")
+ if err != nil {
+ t.Fatalf("failed to create temp directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ path := filepath.Join(dir, "long_name.and_extension")
+ f, err := os.Create(path)
+ if err != nil {
+ t.Fatalf("failed to create %v: %v", path, err)
+ }
+ f.Close()
+
+ type X struct {
+ fd windows.Win32finddata
+ got byte
+ pad [10]byte // to protect ourselves
+
+ }
+ var want byte = 2 // it is unlikely to have this character in the filename
+ x := X{got: want}
+
+ pathp, _ := windows.UTF16PtrFromString(path)
+ h, err := windows.FindFirstFile(pathp, &(x.fd))
+ if err != nil {
+ t.Fatalf("FindFirstFile failed: %v", err)
+ }
+ err = windows.FindClose(h)
+ if err != nil {
+ t.Fatalf("FindClose failed: %v", err)
+ }
+
+ if x.got != want {
+ t.Fatalf("memory corruption: want=%d got=%d", want, x.got)
+ }
+}
+
+func TestFormatMessage(t *testing.T) {
+ dll := windows.MustLoadDLL("pdh.dll")
+
+ pdhOpenQuery := func(datasrc *uint16, userdata uint32, query *windows.Handle) (errno uintptr) {
+ r0, _, _ := syscall.Syscall(dll.MustFindProc("PdhOpenQueryW").Addr(), 3, uintptr(unsafe.Pointer(datasrc)), uintptr(userdata), uintptr(unsafe.Pointer(query)))
+ return r0
+ }
+
+ pdhCloseQuery := func(query windows.Handle) (errno uintptr) {
+ r0, _, _ := syscall.Syscall(dll.MustFindProc("PdhCloseQuery").Addr(), 1, uintptr(query), 0, 0)
+ return r0
+ }
+
+ var q windows.Handle
+ name, err := windows.UTF16PtrFromString("no_such_source")
+ if err != nil {
+ t.Fatal(err)
+ }
+ errno := pdhOpenQuery(name, 0, &q)
+ if errno == 0 {
+ pdhCloseQuery(q)
+ t.Fatal("PdhOpenQuery succeeded, but expected to fail.")
+ }
+
+ const flags uint32 = syscall.FORMAT_MESSAGE_FROM_HMODULE | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS
+ buf := make([]uint16, 300)
+ _, err = windows.FormatMessage(flags, uintptr(dll.Handle), uint32(errno), 0, buf, nil)
+ if err != nil {
+ t.Fatalf("FormatMessage for handle=%x and errno=%x failed: %v", dll.Handle, errno, err)
+ }
+}
+
+func abort(funcname string, err error) {
+ panic(funcname + " failed: " + err.Error())
+}
+
+func ExampleLoadLibrary() {
+ h, err := windows.LoadLibrary("kernel32.dll")
+ if err != nil {
+ abort("LoadLibrary", err)
+ }
+ defer windows.FreeLibrary(h)
+ proc, err := windows.GetProcAddress(h, "GetVersion")
+ if err != nil {
+ abort("GetProcAddress", err)
+ }
+ r, _, _ := syscall.Syscall(uintptr(proc), 0, 0, 0, 0)
+ major := byte(r)
+ minor := uint8(r >> 8)
+ build := uint16(r >> 16)
+ print("windows version ", major, ".", minor, " (Build ", build, ")\n")
+}
+
+func TestTOKEN_ALL_ACCESS(t *testing.T) {
+ if windows.TOKEN_ALL_ACCESS != 0xF01FF {
+ t.Errorf("TOKEN_ALL_ACCESS = %x, want 0xF01FF", windows.TOKEN_ALL_ACCESS)
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/buildutil/allpackages_test.go b/vendor/golang.org/x/tools/go/buildutil/allpackages_test.go
new file mode 100644
index 0000000..1815512
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/buildutil/allpackages_test.go
@@ -0,0 +1,83 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Incomplete source tree on Android.
+
+// +build !android
+
+package buildutil_test
+
+import (
+ "go/build"
+ "runtime"
+ "sort"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/buildutil"
+)
+
+func TestAllPackages(t *testing.T) {
+ if runtime.Compiler == "gccgo" {
+ t.Skip("gccgo has no standard packages")
+ }
+
+ all := buildutil.AllPackages(&build.Default)
+
+ set := make(map[string]bool)
+ for _, pkg := range all {
+ set[pkg] = true
+ }
+
+ const wantAtLeast = 250
+ if len(all) < wantAtLeast {
+ t.Errorf("Found only %d packages, want at least %d", len(all), wantAtLeast)
+ }
+
+ for _, want := range []string{"fmt", "crypto/sha256", "golang.org/x/tools/go/buildutil"} {
+ if !set[want] {
+ t.Errorf("Package %q not found; got %s", want, all)
+ }
+ }
+}
+
+func TestExpandPatterns(t *testing.T) {
+ tree := make(map[string]map[string]string)
+ for _, pkg := range []string{
+ "encoding",
+ "encoding/xml",
+ "encoding/hex",
+ "encoding/json",
+ "fmt",
+ } {
+ tree[pkg] = make(map[string]string)
+ }
+ ctxt := buildutil.FakeContext(tree)
+
+ for _, test := range []struct {
+ patterns string
+ want string
+ }{
+ {"", ""},
+ {"fmt", "fmt"},
+ {"nosuchpkg", "nosuchpkg"},
+ {"nosuchdir/...", ""},
+ {"...", "encoding encoding/hex encoding/json encoding/xml fmt"},
+ {"encoding/... -encoding/xml", "encoding encoding/hex encoding/json"},
+ {"... -encoding/...", "fmt"},
+ {"encoding", "encoding"},
+ {"encoding/", "encoding"},
+ } {
+ var pkgs []string
+ for pkg := range buildutil.ExpandPatterns(ctxt, strings.Fields(test.patterns)) {
+ pkgs = append(pkgs, pkg)
+ }
+ sort.Strings(pkgs)
+ got := strings.Join(pkgs, " ")
+ if got != test.want {
+ t.Errorf("ExpandPatterns(%s) = %s, want %s",
+ test.patterns, got, test.want)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/buildutil/overlay_test.go b/vendor/golang.org/x/tools/go/buildutil/overlay_test.go
new file mode 100644
index 0000000..92e2258
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/buildutil/overlay_test.go
@@ -0,0 +1,70 @@
+package buildutil_test
+
+import (
+ "go/build"
+ "io/ioutil"
+ "reflect"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/buildutil"
+)
+
+func TestParseOverlayArchive(t *testing.T) {
+ var tt = []struct {
+ in string
+ out map[string][]byte
+ hasErr bool
+ }{
+ {
+ "a.go\n5\n12345",
+ map[string][]byte{"a.go": []byte("12345")},
+ false,
+ },
+ {
+ "a.go\n5\n1234",
+ nil,
+ true,
+ },
+ {
+ "a.go\n5\n12345b.go\n4\n1234",
+ map[string][]byte{"a.go": []byte("12345"), "b.go": []byte("1234")},
+ false,
+ },
+ }
+
+ for _, test := range tt {
+ got, err := buildutil.ParseOverlayArchive(strings.NewReader(test.in))
+ if err == nil && test.hasErr {
+ t.Errorf("expected error for %q", test.in)
+ }
+ if err != nil && !test.hasErr {
+ t.Errorf("unexpected error %v for %q", err, test.in)
+ }
+ if !reflect.DeepEqual(got, test.out) {
+ t.Errorf("got %#v, want %#v", got, test.out)
+ }
+ }
+}
+
+func TestOverlay(t *testing.T) {
+ ctx := &build.Default
+ ov := map[string][]byte{
+ "/somewhere/a.go": []byte("file contents"),
+ }
+ names := []string{"/somewhere/a.go", "/somewhere//a.go"}
+ ctx = buildutil.OverlayContext(ctx, ov)
+ for _, name := range names {
+ f, err := buildutil.OpenFile(ctx, name)
+ if err != nil {
+ t.Errorf("unexpected error %v", err)
+ }
+ b, err := ioutil.ReadAll(f)
+ if err != nil {
+ t.Errorf("unexpected error %v", err)
+ }
+ if got, expected := string(b), string(ov["/somewhere/a.go"]); got != expected {
+ t.Errorf("read %q, expected %q", got, expected)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/buildutil/tags_test.go b/vendor/golang.org/x/tools/go/buildutil/tags_test.go
new file mode 100644
index 0000000..0fc2618
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/buildutil/tags_test.go
@@ -0,0 +1,28 @@
+package buildutil_test
+
+import (
+ "flag"
+ "go/build"
+ "reflect"
+ "testing"
+
+ "golang.org/x/tools/go/buildutil"
+)
+
+func TestTags(t *testing.T) {
+ f := flag.NewFlagSet("TestTags", flag.PanicOnError)
+ var ctxt build.Context
+ f.Var((*buildutil.TagsFlag)(&ctxt.BuildTags), "tags", buildutil.TagsFlagDoc)
+ f.Parse([]string{"-tags", ` 'one'"two" 'three "four"'`, "rest"})
+
+ // BuildTags
+ want := []string{"one", "two", "three \"four\""}
+ if !reflect.DeepEqual(ctxt.BuildTags, want) {
+ t.Errorf("BuildTags = %q, want %q", ctxt.BuildTags, want)
+ }
+
+ // Args()
+ if want := []string{"rest"}; !reflect.DeepEqual(f.Args(), want) {
+ t.Errorf("f.Args() = %q, want %q", f.Args(), want)
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/buildutil/util_test.go b/vendor/golang.org/x/tools/go/buildutil/util_test.go
new file mode 100644
index 0000000..c72d59d
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/buildutil/util_test.go
@@ -0,0 +1,85 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buildutil_test
+
+import (
+ "go/build"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "testing"
+
+ "golang.org/x/tools/go/buildutil"
+)
+
+func TestContainingPackage(t *testing.T) {
+ if runtime.Compiler == "gccgo" {
+ t.Skip("gccgo has no GOROOT")
+ }
+
+ // unvirtualized:
+ goroot := runtime.GOROOT()
+ gopath := gopathContainingTools(t)
+
+ type Test struct {
+ gopath, filename, wantPkg string
+ }
+
+ tests := []Test{
+ {gopath, goroot + "/src/fmt/print.go", "fmt"},
+ {gopath, goroot + "/src/encoding/json/foo.go", "encoding/json"},
+ {gopath, goroot + "/src/encoding/missing/foo.go", "(not found)"},
+ {gopath, gopath + "/src/golang.org/x/tools/go/buildutil/util_test.go",
+ "golang.org/x/tools/go/buildutil"},
+ }
+
+ if runtime.GOOS != "windows" && runtime.GOOS != "plan9" {
+ // Make a symlink to gopath for test
+ tmp, err := ioutil.TempDir(os.TempDir(), "go")
+ if err != nil {
+ t.Errorf("Unable to create a temporary directory in %s", os.TempDir())
+ }
+
+ defer os.RemoveAll(tmp)
+
+ // symlink between $GOPATH/src and /tmp/go/src
+ // in order to test all possible symlink cases
+ if err := os.Symlink(gopath+"/src", tmp+"/src"); err != nil {
+ t.Fatal(err)
+ }
+ tests = append(tests, []Test{
+ {gopath, tmp + "/src/golang.org/x/tools/go/buildutil/util_test.go", "golang.org/x/tools/go/buildutil"},
+ {tmp, gopath + "/src/golang.org/x/tools/go/buildutil/util_test.go", "golang.org/x/tools/go/buildutil"},
+ {tmp, tmp + "/src/golang.org/x/tools/go/buildutil/util_test.go", "golang.org/x/tools/go/buildutil"},
+ }...)
+ }
+
+ for _, test := range tests {
+ var got string
+ var buildContext = build.Default
+ buildContext.GOPATH = test.gopath
+ bp, err := buildutil.ContainingPackage(&buildContext, ".", test.filename)
+ if err != nil {
+ got = "(not found)"
+ } else {
+ got = bp.ImportPath
+ }
+ if got != test.wantPkg {
+ t.Errorf("ContainingPackage(%q) = %s, want %s", test.filename, got, test.wantPkg)
+ }
+ }
+
+ // TODO(adonovan): test on virtualized GOPATH too.
+}
+
+// gopathContainingTools returns the path of the GOPATH workspace
+// with golang.org/x/tools, or fails the test if it can't locate it.
+func gopathContainingTools(t *testing.T) string {
+ p, err := build.Import("golang.org/x/tools", "", build.FindOnly)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return p.Root
+}
diff --git a/vendor/golang.org/x/tools/go/buildutil/util_windows_test.go b/vendor/golang.org/x/tools/go/buildutil/util_windows_test.go
new file mode 100644
index 0000000..86fe9c7
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/buildutil/util_windows_test.go
@@ -0,0 +1,48 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buildutil_test
+
+import (
+ "fmt"
+ "go/build"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/buildutil"
+)
+
+func testContainingPackageCaseFold(file, want string) error {
+ bp, err := buildutil.ContainingPackage(&build.Default, ".", file)
+ if err != nil {
+ return err
+ }
+ if got := bp.ImportPath; got != want {
+ return fmt.Errorf("ContainingPackage(%q) = %s, want %s", file, got, want)
+ }
+ return nil
+}
+
+func TestContainingPackageCaseFold(t *testing.T) {
+ path := filepath.Join(runtime.GOROOT(), `src\fmt\print.go`)
+ err := testContainingPackageCaseFold(path, "fmt")
+ if err != nil {
+ t.Error(err)
+ }
+ vol := filepath.VolumeName(path)
+ if len(vol) != 2 || vol[1] != ':' {
+ t.Fatalf("GOROOT path has unexpected volume name: %v", vol)
+ }
+ rest := path[len(vol):]
+ err = testContainingPackageCaseFold(strings.ToUpper(vol)+rest, "fmt")
+ if err != nil {
+ t.Error(err)
+ }
+ err = testContainingPackageCaseFold(strings.ToLower(vol)+rest, "fmt")
+ if err != nil {
+ t.Error(err)
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/example_test.go b/vendor/golang.org/x/tools/go/gcexportdata/example_test.go
new file mode 100644
index 0000000..b67d55f
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/gcexportdata/example_test.go
@@ -0,0 +1,122 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+// +build gc
+
+package gcexportdata_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "log"
+ "os"
+ "path/filepath"
+
+ "golang.org/x/tools/go/gcexportdata"
+)
+
+// ExampleRead uses gcexportdata.Read to load type information for the
+// "fmt" package from the fmt.a file produced by the gc compiler.
+func ExampleRead() {
+ // Find the export data file.
+ filename, path := gcexportdata.Find("fmt", "")
+ if filename == "" {
+ log.Fatalf("can't find export data for fmt")
+ }
+ fmt.Printf("Package path: %s\n", path)
+ fmt.Printf("Export data: %s\n", filepath.Base(filename))
+
+ // Open and read the file.
+ f, err := os.Open(filename)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer f.Close()
+ r, err := gcexportdata.NewReader(f)
+ if err != nil {
+ log.Fatalf("reading export data %s: %v", filename, err)
+ }
+
+ // Decode the export data.
+ fset := token.NewFileSet()
+ imports := make(map[string]*types.Package)
+ pkg, err := gcexportdata.Read(r, fset, imports, path)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Print package information.
+ fmt.Printf("Package members: %s...\n", pkg.Scope().Names()[:5])
+ println := pkg.Scope().Lookup("Println")
+ posn := fset.Position(println.Pos())
+ posn.Line = 123 // make example deterministic
+ fmt.Printf("Println type: %s\n", println.Type())
+ fmt.Printf("Println location: %s\n", slashify(posn))
+
+ // Output:
+ //
+ // Package path: fmt
+ // Export data: fmt.a
+ // Package members: [Errorf Formatter Fprint Fprintf Fprintln]...
+ // Println type: func(a ...interface{}) (n int, err error)
+ // Println location: $GOROOT/src/fmt/print.go:123:1
+}
+
+// ExampleNewImporter demonstrates usage of NewImporter to provide type
+// information for dependencies when type-checking Go source code.
+func ExampleNewImporter() {
+ const src = `package myscanner
+
+// choosing a package that is unlikely to change across releases
+import "text/scanner"
+
+const eof = scanner.EOF
+`
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "myscanner.go", src, 0)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ packages := make(map[string]*types.Package)
+ imp := gcexportdata.NewImporter(fset, packages)
+ conf := types.Config{Importer: imp}
+ pkg, err := conf.Check("myscanner", fset, []*ast.File{f}, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // object from imported package
+ pi := packages["text/scanner"].Scope().Lookup("EOF")
+ fmt.Printf("const %s.%s %s = %s // %s\n",
+ pi.Pkg().Path(),
+ pi.Name(),
+ pi.Type(),
+ pi.(*types.Const).Val(),
+ slashify(fset.Position(pi.Pos())),
+ )
+
+ // object in source package
+ twopi := pkg.Scope().Lookup("eof")
+ fmt.Printf("const %s %s = %s // %s\n",
+ twopi.Name(),
+ twopi.Type(),
+ twopi.(*types.Const).Val(),
+ slashify(fset.Position(twopi.Pos())),
+ )
+
+ // Output:
+ //
+ // const text/scanner.EOF untyped int = -1 // $GOROOT/src/text/scanner/scanner.go:75:1
+ // const eof untyped int = -1 // myscanner.go:6:7
+}
+
+func slashify(posn token.Position) token.Position {
+ posn.Filename = filepath.ToSlash(posn.Filename) // for MS Windows portability
+ return posn
+}
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata_test.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata_test.go
new file mode 100644
index 0000000..69133db
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata_test.go
@@ -0,0 +1,41 @@
+package gcexportdata_test
+
+import (
+ "go/token"
+ "go/types"
+ "log"
+ "os"
+ "testing"
+
+ "golang.org/x/tools/go/gcexportdata"
+)
+
+// Test to ensure that gcexportdata can read files produced by App
+// Engine Go runtime v1.6.
+func TestAppEngine16(t *testing.T) {
+ // Open and read the file.
+ f, err := os.Open("testdata/errors-ae16.a")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ r, err := gcexportdata.NewReader(f)
+ if err != nil {
+ log.Fatalf("reading export data: %v", err)
+ }
+
+ // Decode the export data.
+ fset := token.NewFileSet()
+ imports := make(map[string]*types.Package)
+ pkg, err := gcexportdata.Read(r, fset, imports, "errors")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Print package information.
+ got := pkg.Scope().Lookup("New").Type().String()
+ want := "func(text string) error"
+ if got != want {
+ t.Errorf("New.Type = %s, want %s", got, want)
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport19_test.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport19_test.go
new file mode 100644
index 0000000..5c3cf2d
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport19_test.go
@@ -0,0 +1,96 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.9
+
+package gcimporter_test
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "testing"
+
+ "golang.org/x/tools/go/internal/gcimporter"
+)
+
+const src = `
+package p
+
+type (
+ T0 = int32
+ T1 = struct{}
+ T2 = struct{ T1 }
+ Invalid = foo // foo is undeclared
+)
+`
+
+func checkPkg(t *testing.T, pkg *types.Package, label string) {
+ T1 := types.NewStruct(nil, nil)
+ T2 := types.NewStruct([]*types.Var{types.NewField(0, pkg, "T1", T1, true)}, nil)
+
+ for _, test := range []struct {
+ name string
+ typ types.Type
+ }{
+ {"T0", types.Typ[types.Int32]},
+ {"T1", T1},
+ {"T2", T2},
+ {"Invalid", types.Typ[types.Invalid]},
+ } {
+ obj := pkg.Scope().Lookup(test.name)
+ if obj == nil {
+ t.Errorf("%s: %s not found", label, test.name)
+ continue
+ }
+ tname, _ := obj.(*types.TypeName)
+ if tname == nil {
+ t.Errorf("%s: %v not a type name", label, obj)
+ continue
+ }
+ if !tname.IsAlias() {
+ t.Errorf("%s: %v: not marked as alias", label, tname)
+ continue
+ }
+ if got := tname.Type(); !types.Identical(got, test.typ) {
+ t.Errorf("%s: %v: got %v; want %v", label, tname, got, test.typ)
+ }
+ }
+}
+
+func TestTypeAliases(t *testing.T) {
+ // parse and typecheck
+ fset1 := token.NewFileSet()
+ f, err := parser.ParseFile(fset1, "p.go", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var conf types.Config
+ pkg1, err := conf.Check("p", fset1, []*ast.File{f}, nil)
+ if err == nil {
+ // foo in undeclared in src; we should see an error
+ t.Fatal("invalid source type-checked without error")
+ }
+ if pkg1 == nil {
+ // despite incorrect src we should see a (partially) type-checked package
+ t.Fatal("nil package returned")
+ }
+ checkPkg(t, pkg1, "export")
+
+ // export
+ exportdata, err := gcimporter.BExportData(fset1, pkg1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // import
+ imports := make(map[string]*types.Package)
+ fset2 := token.NewFileSet()
+ _, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg1.Path())
+ if err != nil {
+ t.Fatalf("BImportData(%s): %v", pkg1.Path(), err)
+ }
+ checkPkg(t, pkg2, "import")
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport_test.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport_test.go
new file mode 100644
index 0000000..e78b78d
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport_test.go
@@ -0,0 +1,335 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcimporter_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/constant"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/buildutil"
+ "golang.org/x/tools/go/internal/gcimporter"
+ "golang.org/x/tools/go/loader"
+)
+
+func TestBExportData_stdlib(t *testing.T) {
+ if runtime.Compiler == "gccgo" {
+ t.Skip("gccgo standard library is inaccessible")
+ }
+ if runtime.GOOS == "android" {
+ t.Skipf("incomplete std lib on %s", runtime.GOOS)
+ }
+
+ // Load, parse and type-check the program.
+ ctxt := build.Default // copy
+ ctxt.GOPATH = "" // disable GOPATH
+ conf := loader.Config{
+ Build: &ctxt,
+ AllowErrors: true,
+ }
+ for _, path := range buildutil.AllPackages(conf.Build) {
+ conf.Import(path)
+ }
+
+ // Create a package containing type and value errors to ensure
+ // they are properly encoded/decoded.
+ f, err := conf.ParseFile("haserrors/haserrors.go", `package haserrors
+const UnknownValue = "" + 0
+type UnknownType undefined
+`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ conf.CreateFromFiles("haserrors", f)
+
+ prog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("Load failed: %v", err)
+ }
+
+ numPkgs := len(prog.AllPackages)
+ if want := 248; numPkgs < want {
+ t.Errorf("Loaded only %d packages, want at least %d", numPkgs, want)
+ }
+
+ for pkg, info := range prog.AllPackages {
+ if info.Files == nil {
+ continue // empty directory
+ }
+ exportdata, err := gcimporter.BExportData(conf.Fset, pkg)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ imports := make(map[string]*types.Package)
+ fset2 := token.NewFileSet()
+ n, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg.Path())
+ if err != nil {
+ t.Errorf("BImportData(%s): %v", pkg.Path(), err)
+ continue
+ }
+ if n != len(exportdata) {
+ t.Errorf("BImportData(%s) decoded %d bytes, want %d",
+ pkg.Path(), n, len(exportdata))
+ }
+
+ // Compare the packages' corresponding members.
+ for _, name := range pkg.Scope().Names() {
+ if !ast.IsExported(name) {
+ continue
+ }
+ obj1 := pkg.Scope().Lookup(name)
+ obj2 := pkg2.Scope().Lookup(name)
+ if obj2 == nil {
+ t.Errorf("%s.%s not found, want %s", pkg.Path(), name, obj1)
+ continue
+ }
+
+ fl1 := fileLine(conf.Fset, obj1)
+ fl2 := fileLine(fset2, obj2)
+ if fl1 != fl2 {
+ t.Errorf("%s.%s: got posn %s, want %s",
+ pkg.Path(), name, fl2, fl1)
+ }
+
+ if err := equalObj(obj1, obj2); err != nil {
+ t.Errorf("%s.%s: %s\ngot: %s\nwant: %s",
+ pkg.Path(), name, err, obj2, obj1)
+ }
+ }
+ }
+}
+
+func fileLine(fset *token.FileSet, obj types.Object) string {
+ posn := fset.Position(obj.Pos())
+ return fmt.Sprintf("%s:%d", posn.Filename, posn.Line)
+}
+
+// equalObj reports how x and y differ. They are assumed to belong to
+// different universes so cannot be compared directly.
+func equalObj(x, y types.Object) error {
+ if reflect.TypeOf(x) != reflect.TypeOf(y) {
+ return fmt.Errorf("%T vs %T", x, y)
+ }
+ xt := x.Type()
+ yt := y.Type()
+ switch x.(type) {
+ case *types.Var, *types.Func:
+ // ok
+ case *types.Const:
+ xval := x.(*types.Const).Val()
+ yval := y.(*types.Const).Val()
+ // Use string comparison for floating-point values since rounding is permitted.
+ if constant.Compare(xval, token.NEQ, yval) &&
+ !(xval.Kind() == constant.Float && xval.String() == yval.String()) {
+ return fmt.Errorf("unequal constants %s vs %s", xval, yval)
+ }
+ case *types.TypeName:
+ xt = xt.Underlying()
+ yt = yt.Underlying()
+ default:
+ return fmt.Errorf("unexpected %T", x)
+ }
+ return equalType(xt, yt)
+}
+
+func equalType(x, y types.Type) error {
+ if reflect.TypeOf(x) != reflect.TypeOf(y) {
+ return fmt.Errorf("unequal kinds: %T vs %T", x, y)
+ }
+ switch x := x.(type) {
+ case *types.Interface:
+ y := y.(*types.Interface)
+ // TODO(gri): enable separate emission of Embedded interfaces
+ // and ExplicitMethods then use this logic.
+ // if x.NumEmbeddeds() != y.NumEmbeddeds() {
+ // return fmt.Errorf("unequal number of embedded interfaces: %d vs %d",
+ // x.NumEmbeddeds(), y.NumEmbeddeds())
+ // }
+ // for i := 0; i < x.NumEmbeddeds(); i++ {
+ // xi := x.Embedded(i)
+ // yi := y.Embedded(i)
+ // if xi.String() != yi.String() {
+ // return fmt.Errorf("mismatched %th embedded interface: %s vs %s",
+ // i, xi, yi)
+ // }
+ // }
+ // if x.NumExplicitMethods() != y.NumExplicitMethods() {
+ // return fmt.Errorf("unequal methods: %d vs %d",
+ // x.NumExplicitMethods(), y.NumExplicitMethods())
+ // }
+ // for i := 0; i < x.NumExplicitMethods(); i++ {
+ // xm := x.ExplicitMethod(i)
+ // ym := y.ExplicitMethod(i)
+ // if xm.Name() != ym.Name() {
+ // return fmt.Errorf("mismatched %th method: %s vs %s", i, xm, ym)
+ // }
+ // if err := equalType(xm.Type(), ym.Type()); err != nil {
+ // return fmt.Errorf("mismatched %s method: %s", xm.Name(), err)
+ // }
+ // }
+ if x.NumMethods() != y.NumMethods() {
+ return fmt.Errorf("unequal methods: %d vs %d",
+ x.NumMethods(), y.NumMethods())
+ }
+ for i := 0; i < x.NumMethods(); i++ {
+ xm := x.Method(i)
+ ym := y.Method(i)
+ if xm.Name() != ym.Name() {
+ return fmt.Errorf("mismatched %dth method: %s vs %s", i, xm, ym)
+ }
+ if err := equalType(xm.Type(), ym.Type()); err != nil {
+ return fmt.Errorf("mismatched %s method: %s", xm.Name(), err)
+ }
+ }
+ case *types.Array:
+ y := y.(*types.Array)
+ if x.Len() != y.Len() {
+ return fmt.Errorf("unequal array lengths: %d vs %d", x.Len(), y.Len())
+ }
+ if err := equalType(x.Elem(), y.Elem()); err != nil {
+ return fmt.Errorf("array elements: %s", err)
+ }
+ case *types.Basic:
+ y := y.(*types.Basic)
+ if x.Kind() != y.Kind() {
+ return fmt.Errorf("unequal basic types: %s vs %s", x, y)
+ }
+ case *types.Chan:
+ y := y.(*types.Chan)
+ if x.Dir() != y.Dir() {
+ return fmt.Errorf("unequal channel directions: %d vs %d", x.Dir(), y.Dir())
+ }
+ if err := equalType(x.Elem(), y.Elem()); err != nil {
+ return fmt.Errorf("channel elements: %s", err)
+ }
+ case *types.Map:
+ y := y.(*types.Map)
+ if err := equalType(x.Key(), y.Key()); err != nil {
+ return fmt.Errorf("map keys: %s", err)
+ }
+ if err := equalType(x.Elem(), y.Elem()); err != nil {
+ return fmt.Errorf("map values: %s", err)
+ }
+ case *types.Named:
+ y := y.(*types.Named)
+ if x.String() != y.String() {
+ return fmt.Errorf("unequal named types: %s vs %s", x, y)
+ }
+ case *types.Pointer:
+ y := y.(*types.Pointer)
+ if err := equalType(x.Elem(), y.Elem()); err != nil {
+ return fmt.Errorf("pointer elements: %s", err)
+ }
+ case *types.Signature:
+ y := y.(*types.Signature)
+ if err := equalType(x.Params(), y.Params()); err != nil {
+ return fmt.Errorf("parameters: %s", err)
+ }
+ if err := equalType(x.Results(), y.Results()); err != nil {
+ return fmt.Errorf("results: %s", err)
+ }
+ if x.Variadic() != y.Variadic() {
+ return fmt.Errorf("unequal varidicity: %t vs %t",
+ x.Variadic(), y.Variadic())
+ }
+ if (x.Recv() != nil) != (y.Recv() != nil) {
+ return fmt.Errorf("unequal receivers: %s vs %s", x.Recv(), y.Recv())
+ }
+ if x.Recv() != nil {
+ // TODO(adonovan): fix: this assertion fires for interface methods.
+ // The type of the receiver of an interface method is a named type
+ // if the Package was loaded from export data, or an unnamed (interface)
+ // type if the Package was produced by type-checking ASTs.
+ // if err := equalType(x.Recv().Type(), y.Recv().Type()); err != nil {
+ // return fmt.Errorf("receiver: %s", err)
+ // }
+ }
+ case *types.Slice:
+ y := y.(*types.Slice)
+ if err := equalType(x.Elem(), y.Elem()); err != nil {
+ return fmt.Errorf("slice elements: %s", err)
+ }
+ case *types.Struct:
+ y := y.(*types.Struct)
+ if x.NumFields() != y.NumFields() {
+ return fmt.Errorf("unequal struct fields: %d vs %d",
+ x.NumFields(), y.NumFields())
+ }
+ for i := 0; i < x.NumFields(); i++ {
+ xf := x.Field(i)
+ yf := y.Field(i)
+ if xf.Name() != yf.Name() {
+ return fmt.Errorf("mismatched fields: %s vs %s", xf, yf)
+ }
+ if err := equalType(xf.Type(), yf.Type()); err != nil {
+ return fmt.Errorf("struct field %s: %s", xf.Name(), err)
+ }
+ if x.Tag(i) != y.Tag(i) {
+ return fmt.Errorf("struct field %s has unequal tags: %q vs %q",
+ xf.Name(), x.Tag(i), y.Tag(i))
+ }
+ }
+ case *types.Tuple:
+ y := y.(*types.Tuple)
+ if x.Len() != y.Len() {
+ return fmt.Errorf("unequal tuple lengths: %d vs %d", x.Len(), y.Len())
+ }
+ for i := 0; i < x.Len(); i++ {
+ if err := equalType(x.At(i).Type(), y.At(i).Type()); err != nil {
+ return fmt.Errorf("tuple element %d: %s", i, err)
+ }
+ }
+ }
+ return nil
+}
+
+// TestVeryLongFile tests the position of an import object declared in
+// a very long input file. Line numbers greater than maxlines are
+// reported as line 1, not garbage or token.NoPos.
+func TestVeryLongFile(t *testing.T) {
+ // parse and typecheck
+ longFile := "package foo" + strings.Repeat("\n", 123456) + "var X int"
+ fset1 := token.NewFileSet()
+ f, err := parser.ParseFile(fset1, "foo.go", longFile, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var conf types.Config
+ pkg, err := conf.Check("foo", fset1, []*ast.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // export
+ exportdata, err := gcimporter.BExportData(fset1, pkg)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // import
+ imports := make(map[string]*types.Package)
+ fset2 := token.NewFileSet()
+ _, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg.Path())
+ if err != nil {
+ t.Fatalf("BImportData(%s): %v", pkg.Path(), err)
+ }
+
+ // compare
+ posn1 := fset1.Position(pkg.Scope().Lookup("X").Pos())
+ posn2 := fset2.Position(pkg2.Scope().Lookup("X").Pos())
+ if want := "foo.go:1:1"; posn2.String() != want {
+ t.Errorf("X position = %s, want %s (orig was %s)",
+ posn2, want, posn1)
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter_test.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter_test.go
new file mode 100644
index 0000000..56cdfc0
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter_test.go
@@ -0,0 +1,521 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/gcimporter_test.go,
+// adjusted to make it build with code from (std lib) internal/testenv copied.
+
+package gcimporter
+
+import (
+ "bytes"
+ "fmt"
+ "go/types"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+)
+
+// ----------------------------------------------------------------------------
+// The following three functions (Builder, HasGoBuild, MustHaveGoBuild) were
+// copied from $GOROOT/src/internal/testenv since that package is not available
+// in x/tools.
+
+// Builder reports the name of the builder running this test
+// (for example, "linux-amd64" or "windows-386-gce").
+// If the test is not running on the build infrastructure,
+// Builder returns the empty string.
+func Builder() string {
+ return os.Getenv("GO_BUILDER_NAME")
+}
+
+// HasGoBuild reports whether the current system can build programs with ``go build''
+// and then run them with os.StartProcess or exec.Command.
+func HasGoBuild() bool {
+ switch runtime.GOOS {
+ case "android", "nacl":
+ return false
+ case "darwin":
+ if strings.HasPrefix(runtime.GOARCH, "arm") {
+ return false
+ }
+ }
+ return true
+}
+
+// MustHaveGoBuild checks that the current system can build programs with ``go build''
+// and then run them with os.StartProcess or exec.Command.
+// If not, MustHaveGoBuild calls t.Skip with an explanation.
+func MustHaveGoBuild(t *testing.T) {
+ if !HasGoBuild() {
+ t.Skipf("skipping test: 'go build' not available on %s/%s", runtime.GOOS, runtime.GOARCH)
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+// skipSpecialPlatforms causes the test to be skipped for platforms where
+// builders (build.golang.org) don't have access to compiled packages for
+// import.
+func skipSpecialPlatforms(t *testing.T) {
+ switch platform := runtime.GOOS + "-" + runtime.GOARCH; platform {
+ case "nacl-amd64p32",
+ "nacl-386",
+ "nacl-arm",
+ "darwin-arm",
+ "darwin-arm64":
+ t.Skipf("no compiled packages available for import on %s", platform)
+ }
+}
+
+func compile(t *testing.T, dirname, filename string) string {
+ /* testenv. */ MustHaveGoBuild(t)
+ cmd := exec.Command("go", "tool", "compile", filename)
+ cmd.Dir = dirname
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Logf("%s", out)
+ t.Fatalf("go tool compile %s failed: %s", filename, err)
+ }
+ // filename should end with ".go"
+ return filepath.Join(dirname, filename[:len(filename)-2]+"o")
+}
+
+func testPath(t *testing.T, path, srcDir string) *types.Package {
+ t0 := time.Now()
+ pkg, err := Import(make(map[string]*types.Package), path, srcDir)
+ if err != nil {
+ t.Errorf("testPath(%s): %s", path, err)
+ return nil
+ }
+ t.Logf("testPath(%s): %v", path, time.Since(t0))
+ return pkg
+}
+
+const maxTime = 30 * time.Second
+
+func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
+ dirname := filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_"+runtime.GOARCH, dir)
+ list, err := ioutil.ReadDir(dirname)
+ if err != nil {
+ t.Fatalf("testDir(%s): %s", dirname, err)
+ }
+ for _, f := range list {
+ if time.Now().After(endTime) {
+ t.Log("testing time used up")
+ return
+ }
+ switch {
+ case !f.IsDir():
+ // try extensions
+ for _, ext := range pkgExts {
+ if strings.HasSuffix(f.Name(), ext) {
+ name := f.Name()[0 : len(f.Name())-len(ext)] // remove extension
+ if testPath(t, filepath.Join(dir, name), dir) != nil {
+ nimports++
+ }
+ }
+ }
+ case f.IsDir():
+ nimports += testDir(t, filepath.Join(dir, f.Name()), endTime)
+ }
+ }
+ return
+}
+
+const testfile = "exports.go"
+
+func TestImportTestdata(t *testing.T) {
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ if outFn := compile(t, "testdata", testfile); outFn != "" {
+ defer os.Remove(outFn)
+ }
+
+ // filename should end with ".go"
+ filename := testfile[:len(testfile)-3]
+ if pkg := testPath(t, "./testdata/"+filename, "."); pkg != nil {
+ // The package's Imports list must include all packages
+ // explicitly imported by testfile, plus all packages
+ // referenced indirectly via exported objects in testfile.
+ // With the textual export format (when run against Go1.6),
+ // the list may also include additional packages that are
+ // not strictly required for import processing alone (they
+ // are exported to err "on the safe side").
+ // For now, we just test the presence of a few packages
+ // that we know are there for sure.
+ got := fmt.Sprint(pkg.Imports())
+ for _, want := range []string{"go/ast", "go/token"} {
+ if !strings.Contains(got, want) {
+ t.Errorf(`Package("exports").Imports() = %s, does not contain %s`, got, want)
+ }
+ }
+ }
+}
+
+func TestVersionHandling(t *testing.T) {
+ skipSpecialPlatforms(t) // we really only need to exclude nacl platforms, but this is fine
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ const dir = "./testdata/versions"
+ list, err := ioutil.ReadDir(dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, f := range list {
+ name := f.Name()
+ if !strings.HasSuffix(name, ".a") {
+ continue // not a package file
+ }
+ if strings.Contains(name, "corrupted") {
+ continue // don't process a leftover corrupted file
+ }
+ pkgpath := "./" + name[:len(name)-2]
+
+ // test that export data can be imported
+ _, err := Import(make(map[string]*types.Package), pkgpath, dir)
+ if err != nil {
+ t.Errorf("import %q failed: %v", pkgpath, err)
+ continue
+ }
+
+ // create file with corrupted export data
+ // 1) read file
+ data, err := ioutil.ReadFile(filepath.Join(dir, name))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // 2) find export data
+ i := bytes.Index(data, []byte("\n$$B\n")) + 5
+ j := bytes.Index(data[i:], []byte("\n$$\n")) + i
+ if i < 0 || j < 0 || i > j {
+ t.Fatalf("export data section not found (i = %d, j = %d)", i, j)
+ }
+ // 3) corrupt the data (increment every 7th byte)
+ for k := j - 13; k >= i; k -= 7 {
+ data[k]++
+ }
+ // 4) write the file
+ pkgpath += "_corrupted"
+ filename := filepath.Join(dir, pkgpath) + ".a"
+ ioutil.WriteFile(filename, data, 0666)
+ defer os.Remove(filename)
+
+ // test that importing the corrupted file results in an error
+ _, err = Import(make(map[string]*types.Package), pkgpath, dir)
+ if err == nil {
+ t.Errorf("import corrupted %q succeeded", pkgpath)
+ } else if msg := err.Error(); !strings.Contains(msg, "version skew") {
+ t.Errorf("import %q error incorrect (%s)", pkgpath, msg)
+ }
+ }
+}
+
+func TestImportStdLib(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ dt := maxTime
+ if testing.Short() && /* testenv. */ Builder() == "" {
+ dt = 10 * time.Millisecond
+ }
+ nimports := testDir(t, "", time.Now().Add(dt)) // installed packages
+ t.Logf("tested %d imports", nimports)
+}
+
+var importedObjectTests = []struct {
+ name string
+ want string
+}{
+ {"math.Pi", "const Pi untyped float"},
+ {"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"},
+ // Go 1.7 and 1.8 don't know about embedded interfaces. Leave this
+ // test out for now - the code is tested in the std library anyway.
+ // TODO(gri) enable again once we're off 1.7 and 1.8.
+ // {"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"},
+ {"math.Sin", "func Sin(x float64) float64"},
+ // TODO(gri) add more tests
+}
+
+func TestImportedTypes(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ for _, test := range importedObjectTests {
+ s := strings.Split(test.name, ".")
+ if len(s) != 2 {
+ t.Fatal("inconsistent test data")
+ }
+ importPath := s[0]
+ objName := s[1]
+
+ pkg, err := Import(make(map[string]*types.Package), importPath, ".")
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ obj := pkg.Scope().Lookup(objName)
+ if obj == nil {
+ t.Errorf("%s: object not found", test.name)
+ continue
+ }
+
+ got := types.ObjectString(obj, types.RelativeTo(pkg))
+ if got != test.want {
+ t.Errorf("%s: got %q; want %q", test.name, got, test.want)
+ }
+ }
+}
+
+func TestIssue5815(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ pkg := importPkg(t, "strings")
+
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ obj := scope.Lookup(name)
+ if obj.Pkg() == nil {
+ t.Errorf("no pkg for %s", obj)
+ }
+ if tname, _ := obj.(*types.TypeName); tname != nil {
+ named := tname.Type().(*types.Named)
+ for i := 0; i < named.NumMethods(); i++ {
+ m := named.Method(i)
+ if m.Pkg() == nil {
+ t.Errorf("no pkg for %s", m)
+ }
+ }
+ }
+ }
+}
+
+// Smoke test to ensure that imported methods get the correct package.
+func TestCorrectMethodPackage(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ imports := make(map[string]*types.Package)
+ _, err := Import(imports, "net/http", ".")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ mutex := imports["sync"].Scope().Lookup("Mutex").(*types.TypeName).Type()
+ mset := types.NewMethodSet(types.NewPointer(mutex)) // methods of *sync.Mutex
+ sel := mset.Lookup(nil, "Lock")
+ lock := sel.Obj().(*types.Func)
+ if got, want := lock.Pkg().Path(), "sync"; got != want {
+ t.Errorf("got package path %q; want %q", got, want)
+ }
+}
+
+func TestIssue13566(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ if f := compile(t, "testdata", "a.go"); f != "" {
+ defer os.Remove(f)
+ }
+ if f := compile(t, "testdata", "b.go"); f != "" {
+ defer os.Remove(f)
+ }
+
+ // import must succeed (test for issue at hand)
+ pkg := importPkg(t, "./testdata/b")
+
+ // make sure all indirectly imported packages have names
+ for _, imp := range pkg.Imports() {
+ if imp.Name() == "" {
+ t.Errorf("no name for %s package", imp.Path())
+ }
+ }
+}
+
+func TestIssue13898(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ // import go/internal/gcimporter which imports go/types partially
+ imports := make(map[string]*types.Package)
+ _, err := Import(imports, "go/internal/gcimporter", ".")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // look for go/types package
+ var goTypesPkg *types.Package
+ for path, pkg := range imports {
+ if path == "go/types" {
+ goTypesPkg = pkg
+ break
+ }
+ }
+ if goTypesPkg == nil {
+ t.Fatal("go/types not found")
+ }
+
+ // look for go/types.Object type
+ obj := lookupObj(t, goTypesPkg.Scope(), "Object")
+ typ, ok := obj.Type().(*types.Named)
+ if !ok {
+ t.Fatalf("go/types.Object type is %v; wanted named type", typ)
+ }
+
+ // lookup go/types.Object.Pkg method
+ m, index, indirect := types.LookupFieldOrMethod(typ, false, nil, "Pkg")
+ if m == nil {
+ t.Fatalf("go/types.Object.Pkg not found (index = %v, indirect = %v)", index, indirect)
+ }
+
+ // the method must belong to go/types
+ if m.Pkg().Path() != "go/types" {
+ t.Fatalf("found %v; want go/types", m.Pkg())
+ }
+}
+
+func TestIssue15517(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ if f := compile(t, "testdata", "p.go"); f != "" {
+ defer os.Remove(f)
+ }
+
+ // Multiple imports of p must succeed without redeclaration errors.
+ // We use an import path that's not cleaned up so that the eventual
+ // file path for the package is different from the package path; this
+ // will expose the error if it is present.
+ //
+ // (Issue: Both the textual and the binary importer used the file path
+ // of the package to be imported as key into the shared packages map.
+ // However, the binary importer then used the package path to identify
+ // the imported package to mark it as complete; effectively marking the
+ // wrong package as complete. By using an "unclean" package path, the
+ // file and package path are different, exposing the problem if present.
+ // The same issue occurs with vendoring.)
+ imports := make(map[string]*types.Package)
+ for i := 0; i < 3; i++ {
+ if _, err := Import(imports, "./././testdata/p", "."); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestIssue15920(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ if f := compile(t, "testdata", "issue15920.go"); f != "" {
+ defer os.Remove(f)
+ }
+
+ importPkg(t, "./testdata/issue15920")
+}
+
+func TestIssue20046(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ if f := compile(t, "testdata", "issue20046.go"); f != "" {
+ defer os.Remove(f)
+ }
+
+ // "./issue20046".V.M must exist
+ pkg := importPkg(t, "./testdata/issue20046")
+ obj := lookupObj(t, pkg.Scope(), "V")
+ if m, index, indirect := types.LookupFieldOrMethod(obj.Type(), false, nil, "M"); m == nil {
+ t.Fatalf("V.M not found (index = %v, indirect = %v)", index, indirect)
+ }
+}
+
+func importPkg(t *testing.T, path string) *types.Package {
+ pkg, err := Import(make(map[string]*types.Package), path, ".")
+ if err != nil {
+ t.Fatal(err)
+ }
+ return pkg
+}
+
+func lookupObj(t *testing.T, scope *types.Scope, name string) types.Object {
+ if obj := scope.Lookup(name); obj != nil {
+ return obj
+ }
+ t.Fatalf("%s not found", name)
+ return nil
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/example_test.go b/vendor/golang.org/x/tools/go/types/typeutil/example_test.go
new file mode 100644
index 0000000..86c4d44
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/example_test.go
@@ -0,0 +1,67 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "sort"
+
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+func ExampleMap() {
+ const source = `package P
+
+var X []string
+var Y []string
+
+const p, q = 1.0, 2.0
+
+func f(offset int32) (value byte, ok bool)
+func g(rune) (uint8, bool)
+`
+
+ // Parse and type-check the package.
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "P.go", source, 0)
+ if err != nil {
+ panic(err)
+ }
+ pkg, err := new(types.Config).Check("P", fset, []*ast.File{f}, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ scope := pkg.Scope()
+
+ // Group names of package-level objects by their type.
+ var namesByType typeutil.Map // value is []string
+ for _, name := range scope.Names() {
+ T := scope.Lookup(name).Type()
+
+ names, _ := namesByType.At(T).([]string)
+ names = append(names, name)
+ namesByType.Set(T, names)
+ }
+
+ // Format, sort, and print the map entries.
+ var lines []string
+ namesByType.Iterate(func(T types.Type, names interface{}) {
+ lines = append(lines, fmt.Sprintf("%s %s", names, T))
+ })
+ sort.Strings(lines)
+ for _, line := range lines {
+ fmt.Println(line)
+ }
+
+ // Output:
+ // [X Y] []string
+ // [f g] func(offset int32) (value byte, ok bool)
+ // [p q] untyped float
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports_test.go b/vendor/golang.org/x/tools/go/types/typeutil/imports_test.go
new file mode 100644
index 0000000..c8ef6d6
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/imports_test.go
@@ -0,0 +1,80 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "testing"
+
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+type closure map[string]*types.Package
+
+func (c closure) Import(path string) (*types.Package, error) { return c[path], nil }
+
+func TestDependencies(t *testing.T) {
+ packages := make(map[string]*types.Package)
+ conf := types.Config{
+ Importer: closure(packages),
+ }
+ fset := token.NewFileSet()
+
+ // All edges go to the right.
+ // /--D--B--A
+ // F \_C_/
+ // \__E_/
+ for i, content := range []string{
+ `package a`,
+ `package c; import (_ "a")`,
+ `package b; import (_ "a")`,
+ `package e; import (_ "c")`,
+ `package d; import (_ "b"; _ "c")`,
+ `package f; import (_ "d"; _ "e")`,
+ } {
+ f, err := parser.ParseFile(fset, fmt.Sprintf("%d.go", i), content, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pkg, err := conf.Check(f.Name.Name, fset, []*ast.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ packages[pkg.Path()] = pkg
+ }
+
+ for _, test := range []struct {
+ roots, want string
+ }{
+ {"a", "a"},
+ {"b", "ab"},
+ {"c", "ac"},
+ {"d", "abcd"},
+ {"e", "ace"},
+ {"f", "abcdef"},
+
+ {"be", "abce"},
+ {"eb", "aceb"},
+ {"de", "abcde"},
+ {"ed", "acebd"},
+ {"ef", "acebdf"},
+ } {
+ var pkgs []*types.Package
+ for _, r := range test.roots {
+ pkgs = append(pkgs, packages[string(r)])
+ }
+ var got string
+ for _, p := range typeutil.Dependencies(pkgs...) {
+ got += p.Path()
+ }
+ if got != test.want {
+ t.Errorf("Dependencies(%q) = %q, want %q", test.roots, got, test.want)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map_test.go b/vendor/golang.org/x/tools/go/types/typeutil/map_test.go
new file mode 100644
index 0000000..34facbe
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/map_test.go
@@ -0,0 +1,174 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil_test
+
+// TODO(adonovan):
+// - test use of explicit hasher across two maps.
+// - test hashcodes are consistent with equals for a range of types
+// (e.g. all types generated by type-checking some body of real code).
+
+import (
+ "go/types"
+ "testing"
+
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+var (
+ tStr = types.Typ[types.String] // string
+ tPStr1 = types.NewPointer(tStr) // *string
+ tPStr2 = types.NewPointer(tStr) // *string, again
+ tInt = types.Typ[types.Int] // int
+ tChanInt1 = types.NewChan(types.RecvOnly, tInt) // <-chan int
+ tChanInt2 = types.NewChan(types.RecvOnly, tInt) // <-chan int, again
+)
+
+func checkEqualButNotIdentical(t *testing.T, x, y types.Type, comment string) {
+ if !types.Identical(x, y) {
+ t.Errorf("%s: not equal: %s, %s", comment, x, y)
+ }
+ if x == y {
+ t.Errorf("%s: identical: %v, %v", comment, x, y)
+ }
+}
+
+func TestAxioms(t *testing.T) {
+ checkEqualButNotIdentical(t, tPStr1, tPStr2, "tPstr{1,2}")
+ checkEqualButNotIdentical(t, tChanInt1, tChanInt2, "tChanInt{1,2}")
+}
+
+func TestMap(t *testing.T) {
+ var tmap *typeutil.Map
+
+ // All methods but Set are safe on on (*T)(nil).
+ tmap.Len()
+ tmap.At(tPStr1)
+ tmap.Delete(tPStr1)
+ tmap.KeysString()
+ tmap.String()
+
+ tmap = new(typeutil.Map)
+
+ // Length of empty map.
+ if l := tmap.Len(); l != 0 {
+ t.Errorf("Len() on empty Map: got %d, want 0", l)
+ }
+ // At of missing key.
+ if v := tmap.At(tPStr1); v != nil {
+ t.Errorf("At() on empty Map: got %v, want nil", v)
+ }
+ // Deletion of missing key.
+ if tmap.Delete(tPStr1) {
+ t.Errorf("Delete() on empty Map: got true, want false")
+ }
+ // Set of new key.
+ if prev := tmap.Set(tPStr1, "*string"); prev != nil {
+ t.Errorf("Set() on empty Map returned non-nil previous value %s", prev)
+ }
+
+ // Now: {*string: "*string"}
+
+ // Length of non-empty map.
+ if l := tmap.Len(); l != 1 {
+ t.Errorf("Len(): got %d, want 1", l)
+ }
+ // At via insertion key.
+ if v := tmap.At(tPStr1); v != "*string" {
+ t.Errorf("At(): got %q, want \"*string\"", v)
+ }
+ // At via equal key.
+ if v := tmap.At(tPStr2); v != "*string" {
+ t.Errorf("At(): got %q, want \"*string\"", v)
+ }
+ // Iteration over sole entry.
+ tmap.Iterate(func(key types.Type, value interface{}) {
+ if key != tPStr1 {
+ t.Errorf("Iterate: key: got %s, want %s", key, tPStr1)
+ }
+ if want := "*string"; value != want {
+ t.Errorf("Iterate: value: got %s, want %s", value, want)
+ }
+ })
+
+ // Setion with key equal to present one.
+ if prev := tmap.Set(tPStr2, "*string again"); prev != "*string" {
+ t.Errorf("Set() previous value: got %s, want \"*string\"", prev)
+ }
+
+ // Setion of another association.
+ if prev := tmap.Set(tChanInt1, "<-chan int"); prev != nil {
+ t.Errorf("Set() previous value: got %s, want nil", prev)
+ }
+
+ // Now: {*string: "*string again", <-chan int: "<-chan int"}
+
+ want1 := "{*string: \"*string again\", <-chan int: \"<-chan int\"}"
+ want2 := "{<-chan int: \"<-chan int\", *string: \"*string again\"}"
+ if s := tmap.String(); s != want1 && s != want2 {
+ t.Errorf("String(): got %s, want %s", s, want1)
+ }
+
+ want1 = "{*string, <-chan int}"
+ want2 = "{<-chan int, *string}"
+ if s := tmap.KeysString(); s != want1 && s != want2 {
+ t.Errorf("KeysString(): got %s, want %s", s, want1)
+ }
+
+ // Keys().
+ I := types.Identical
+ switch k := tmap.Keys(); {
+ case I(k[0], tChanInt1) && I(k[1], tPStr1): // ok
+ case I(k[1], tChanInt1) && I(k[0], tPStr1): // ok
+ default:
+ t.Errorf("Keys(): got %v, want %s", k, want2)
+ }
+
+ if l := tmap.Len(); l != 2 {
+ t.Errorf("Len(): got %d, want 1", l)
+ }
+ // At via original key.
+ if v := tmap.At(tPStr1); v != "*string again" {
+ t.Errorf("At(): got %q, want \"*string again\"", v)
+ }
+ hamming := 1
+ tmap.Iterate(func(key types.Type, value interface{}) {
+ switch {
+ case I(key, tChanInt1):
+ hamming *= 2 // ok
+ case I(key, tPStr1):
+ hamming *= 3 // ok
+ }
+ })
+ if hamming != 6 {
+ t.Errorf("Iterate: hamming: got %d, want %d", hamming, 6)
+ }
+
+ if v := tmap.At(tChanInt2); v != "<-chan int" {
+ t.Errorf("At(): got %q, want \"<-chan int\"", v)
+ }
+ // Deletion with key equal to present one.
+ if !tmap.Delete(tChanInt2) {
+ t.Errorf("Delete() of existing key: got false, want true")
+ }
+
+ // Now: {*string: "*string again"}
+
+ if l := tmap.Len(); l != 1 {
+ t.Errorf("Len(): got %d, want 1", l)
+ }
+ // Deletion again.
+ if !tmap.Delete(tPStr2) {
+ t.Errorf("Delete() of existing key: got false, want true")
+ }
+
+ // Now: {}
+
+ if l := tmap.Len(); l != 0 {
+ t.Errorf("Len(): got %d, want %d", l, 0)
+ }
+ if s := tmap.String(); s != "{}" {
+ t.Errorf("Len(): got %q, want %q", s, "")
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui_test.go b/vendor/golang.org/x/tools/go/types/typeutil/ui_test.go
new file mode 100644
index 0000000..b5064ac
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/ui_test.go
@@ -0,0 +1,61 @@
+package typeutil_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+func TestIntuitiveMethodSet(t *testing.T) {
+ const source = `
+package P
+type A int
+func (A) f()
+func (*A) g()
+`
+
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "hello.go", source, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var conf types.Config
+ pkg, err := conf.Check("P", fset, []*ast.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ qual := types.RelativeTo(pkg)
+
+ for _, test := range []struct {
+ expr string // type expression
+ want string // intuitive method set
+ }{
+ {"A", "(A).f (*A).g"},
+ {"*A", "(*A).f (*A).g"},
+ {"error", "(error).Error"},
+ {"*error", ""},
+ {"struct{A}", "(struct{A}).f (*struct{A}).g"},
+ {"*struct{A}", "(*struct{A}).f (*struct{A}).g"},
+ } {
+ tv, err := types.Eval(fset, pkg, 0, test.expr)
+ if err != nil {
+ t.Errorf("Eval(%s) failed: %v", test.expr, err)
+ }
+ var names []string
+ for _, m := range typeutil.IntuitiveMethodSet(tv.Type, nil) {
+ name := fmt.Sprintf("(%s).%s", types.TypeString(m.Recv(), qual), m.Obj().Name())
+ names = append(names, name)
+ }
+ got := strings.Join(names, " ")
+ if got != test.want {
+ t.Errorf("IntuitiveMethodSet(%s) = %q, want %q", test.expr, got, test.want)
+ }
+ }
+}
From 9c853cdc55b70ec6b392f85f5da7986178810793 Mon Sep 17 00:00:00 2001
From: Senyoret1 <34079003+Senyoret1@users.noreply.github.com>
Date: Sat, 13 Jul 2019 12:19:57 -0400
Subject: [PATCH 5/8] Make the wasm version work with the lastest changes
---
.gitignore | 6 +-
.travis.yml | 2 +-
Gopkg.lock | 47 +-
Makefile | 5 +-
README.md | 4 +-
.../karma-gopher.conf.js | 0
karma-wasm.conf.js => js/karma-wasm.conf.js | 5 +-
js/karma.conf.js | 49 -
js/tests/cipher-wasm-internal.js | 62 +
js/tests/cipher-wasm-internal.spec.ts | 66 -
js/tests/cipher-wasm.spec.ts | 6 +-
.../skycoin/src/cipher/address_test.go | 167 +-
.../skycoin/src/cipher/base58/base58_test.go | 1199 ++++++++++
.../skycoin/src/cipher/bip39/bip39_test.go | 553 +++++
.../skycoin/src/cipher/bip39/example_test.go | 37 +
.../skycoin/src/cipher/bitcoin_test.go | 376 +++
.../skycoin/skycoin/src/cipher/crypto_test.go | 689 ++++--
.../src/cipher/encoder/benchmark_test.go | 33 +
.../src/cipher/encoder/encoder_test.go | 1247 +++++++---
.../skycoin/skycoin/src/cipher/hash_test.go | 262 ++-
.../skycoin/src/cipher/pbkdf2/pbkdf2_test.go | 157 ++
.../src/cipher/secp256k1-go/secp256_test.go | 908 ++++---
.../secp256k1-go/secp256k1-go2/ec_test.go | 7 +-
.../secp256k1-go/secp256k1-go2/field_test.go | 10 +-
.../secp256k1-go/secp256k1-go2/sig_test.go | 297 ++-
.../secp256k1-go/secp256k1-go2/xyz_test.go | 2 +-
.../src/coin/block_body_skyencoder_test.go | 421 ++++
.../src/coin/block_header_skyencoder_test.go | 421 ++++
.../skycoin/skycoin/src/coin/block_test.go | 141 +-
.../skycoin/skycoin/src/coin/coin_test.go | 299 ---
.../transaction_inputs_skyencoder_test.go | 421 ++++
.../transaction_outputs_skyencoder_test.go | 421 ++++
.../src/coin/transaction_skyencoder_test.go | 421 ++++
.../skycoin/src/coin/transactions_test.go | 742 +++---
.../src/coin/ux_body_skyencoder_test.go | 421 ++++
.../src/coin/ux_head_skyencoder_test.go | 421 ++++
.../src/util/mathutil/mathutil_64bit_test.go | 40 +
.../mathutil/mathutil_test.go} | 47 +-
vendor/github.com/spf13/cobra/args_test.go | 46 +
.../spf13/cobra/bash_completions_test.go | 26 +-
vendor/github.com/spf13/cobra/command_test.go | 66 +-
.../cobra/powershell_completions_test.go | 122 +
.../spf13/cobra/zsh_completions_test.go | 472 +++-
vendor/github.com/spf13/pflag/bytes_test.go | 64 +-
vendor/github.com/spf13/pflag/flag_test.go | 5 +
.../spf13/pflag/string_to_int_test.go | 156 ++
.../spf13/pflag/string_to_string_test.go | 162 ++
.../x/crypto/ssh/terminal/terminal_test.go | 51 +-
vendor/golang.org/x/sys/unix/darwin_test.go | 210 ++
vendor/golang.org/x/sys/unix/dirent_test.go | 150 ++
.../{example_test.go => example_exec_test.go} | 2 +-
.../x/sys/unix/example_flock_test.go | 25 +
vendor/golang.org/x/sys/unix/export_test.go | 2 +-
.../x/sys/unix/getdirentries_test.go | 83 +
.../golang.org/x/sys/unix/mmap_unix_test.go | 12 +-
vendor/golang.org/x/sys/unix/openbsd_test.go | 2 +-
vendor/golang.org/x/sys/unix/sendfile_test.go | 98 +
.../golang.org/x/sys/unix/syscall_aix_test.go | 168 ++
.../golang.org/x/sys/unix/syscall_bsd_test.go | 8 +-
.../x/sys/unix/syscall_darwin_test.go | 53 +
.../x/sys/unix/syscall_linux_test.go | 237 +-
.../x/sys/unix/syscall_netbsd_test.go | 51 +
.../x/sys/unix/syscall_openbsd_test.go | 58 +
vendor/golang.org/x/sys/unix/syscall_test.go | 16 +-
.../x/sys/unix/syscall_unix_test.go | 135 +-
.../golang.org/x/sys/unix/timestruct_test.go | 2 +-
vendor/golang.org/x/sys/unix/xattr_test.go | 92 +-
.../golang.org/x/sys/windows/syscall_test.go | 11 +
.../x/sys/windows/syscall_windows_test.go | 162 +-
.../x/tools/go/ast/astutil/enclosing_test.go | 195 ++
.../x/tools/go/ast/astutil/imports_test.go | 2087 +++++++++++++++++
.../x/tools/go/ast/astutil/rewrite_test.go | 248 ++
.../x/tools/go/buildutil/allpackages_test.go | 20 +-
.../x/tools/go/buildutil/util_test.go | 33 +-
.../x/tools/go/gcexportdata/example_test.go | 32 +-
.../go/internal/gcimporter/bexport19_test.go | 96 -
.../go/internal/gcimporter/bexport_test.go | 84 +
.../internal/gcimporter/gcimporter11_test.go | 129 +
.../go/internal/gcimporter/gcimporter_test.go | 170 +-
.../go/internal/gcimporter/iexport_test.go | 308 +++
.../go/internal/gcimporter/israce_test.go | 11 +
.../x/tools/go/types/typeutil/callee_test.go | 89 +
wasm/skycoin.go | 18 +-
83 files changed, 14530 insertions(+), 2149 deletions(-)
rename karma-gopher.conf.js => js/karma-gopher.conf.js (100%)
rename karma-wasm.conf.js => js/karma-wasm.conf.js (80%)
delete mode 100644 js/karma.conf.js
create mode 100644 js/tests/cipher-wasm-internal.js
delete mode 100644 js/tests/cipher-wasm-internal.spec.ts
create mode 100644 vendor/github.com/skycoin/skycoin/src/cipher/base58/base58_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/cipher/bip39/bip39_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/cipher/bip39/example_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/cipher/bitcoin_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/cipher/encoder/benchmark_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/cipher/pbkdf2/pbkdf2_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/coin/block_body_skyencoder_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/coin/block_header_skyencoder_test.go
delete mode 100644 vendor/github.com/skycoin/skycoin/src/coin/coin_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/coin/transaction_inputs_skyencoder_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/coin/transaction_outputs_skyencoder_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/coin/transaction_skyencoder_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/coin/ux_body_skyencoder_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/coin/ux_head_skyencoder_test.go
create mode 100644 vendor/github.com/skycoin/skycoin/src/util/mathutil/mathutil_64bit_test.go
rename vendor/github.com/skycoin/skycoin/src/{coin/math_test.go => util/mathutil/mathutil_test.go} (70%)
create mode 100644 vendor/github.com/spf13/cobra/powershell_completions_test.go
create mode 100644 vendor/github.com/spf13/pflag/string_to_int_test.go
create mode 100644 vendor/github.com/spf13/pflag/string_to_string_test.go
create mode 100644 vendor/golang.org/x/sys/unix/darwin_test.go
create mode 100644 vendor/golang.org/x/sys/unix/dirent_test.go
rename vendor/golang.org/x/sys/unix/{example_test.go => example_exec_test.go} (83%)
create mode 100644 vendor/golang.org/x/sys/unix/example_flock_test.go
create mode 100644 vendor/golang.org/x/sys/unix/getdirentries_test.go
create mode 100644 vendor/golang.org/x/sys/unix/sendfile_test.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_test.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_test.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_test.go
create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/enclosing_test.go
create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/imports_test.go
create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/rewrite_test.go
delete mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bexport19_test.go
create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter11_test.go
create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/iexport_test.go
create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/israce_test.go
create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/callee_test.go
diff --git a/.gitignore b/.gitignore
index dce6f0e..9d8c091 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,9 +3,13 @@ gopherjs-tool.exe
gopherjs-tool
skycoin.js
skycoin.js.map
+test.wasm
# dependencies
/node_modules
+/js/node_modules
+/js/package-lock.json
# temp files
-/coverage
\ No newline at end of file
+/coverage
+/js/coverage
diff --git a/.travis.yml b/.travis.yml
index ad0ad42..6867e1b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -8,7 +8,7 @@ go:
install:
- if [[ ! -d $GOPATH/src/github.com/skycoin/skycoin-lite ]]; then mkdir -p $GOPATH/src/github.com/skycoin; ln -s $TRAVIS_BUILD_DIR $GOPATH/src/github.com/skycoin/skycoin-lite; fi
- cd $GOPATH/src/github.com/skycoin/skycoin-lite
- - go get -t ./...
+ - GOOS=js GOARCH=wasm go get -t ./...
- go get -u github.com/FiloSottile/vendorcheck
# Install pinned golangci-lint, overriding the latest version install by make install-linters
- VERSION=1.17.1 ./ci-scripts/install-golangci-lint.sh
diff --git a/Gopkg.lock b/Gopkg.lock
index f2bb591..fb20ff6 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -2,16 +2,13 @@
[[projects]]
- digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd"
name = "github.com/fsnotify/fsnotify"
packages = ["."]
- pruneopts = "UT"
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
version = "v1.4.7"
[[projects]]
branch = "master"
- digest = "1:b32c5e3359a78839c699d755633c1e15815842826ba22109dc529cc89c90604b"
name = "github.com/gopherjs/gopherjs"
packages = [
".",
@@ -25,60 +22,48 @@
"compiler/prelude",
"compiler/typesutil",
"internal/sysutil",
- "js",
+ "js"
]
- pruneopts = "UT"
revision = "3e4dfb77656c424b6d1196a4d5fed0fcf63677cc"
[[projects]]
- digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
- pruneopts = "UT"
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
- digest = "1:15ec2166e33ef6c60b344a04d050eec79193517e7f5082b6233b2d09ef0d10b8"
name = "github.com/kisielk/gotool"
packages = [
".",
- "internal/load",
+ "internal/load"
]
- pruneopts = "UT"
revision = "80517062f582ea3340cd4baf70e86d539ae7d84d"
version = "v1.0.0"
[[projects]]
branch = "master"
- digest = "1:29c4cf076d5389b62cbec36c471f967ef0d1a0d1bbe0d7b35ccfbcddb9b67965"
name = "github.com/neelance/astrewrite"
packages = ["."]
- pruneopts = "UT"
revision = "99348263ae862cc230986ce88deaddbf7edcc034"
[[projects]]
branch = "master"
- digest = "1:6189e41c1f8ef7e827a98b387f2d548a81ae1961a1042b76f9bf746e40f8ac92"
name = "github.com/neelance/sourcemap"
packages = ["."]
- pruneopts = "UT"
revision = "8c68805598ab8d5637b1a72b5f7d381ea0f39c31"
[[projects]]
branch = "master"
- digest = "1:1e8a61d7b130a14881417800c56cd669a063890f7b0191937099d8ca990b29fa"
name = "github.com/shurcooL/httpfs"
packages = [
"filter",
- "vfsutil",
+ "vfsutil"
]
- pruneopts = "UT"
revision = "8d4bc4ba774931155e6cd5ef6098cb038dd45135"
[[projects]]
branch = "develop"
- digest = "1:9afd6145eeb6b534fdbaf5a9b8db5186d3edec6804c8c53c71f0b13af5e3cc88"
name = "github.com/skycoin/skycoin"
packages = [
"src/cipher",
@@ -91,70 +76,52 @@
"src/cipher/secp256k1-go",
"src/cipher/secp256k1-go/secp256k1-go2",
"src/coin",
- "src/util/mathutil",
+ "src/util/mathutil"
]
- pruneopts = "UT"
revision = "210a51f06749d6edcec4237d2488b614085cfcba"
[[projects]]
- digest = "1:e096613fb7cf34743d49af87d197663cfccd61876e2219853005a57baedfa562"
name = "github.com/spf13/cobra"
packages = ["."]
- pruneopts = "UT"
revision = "f2b07da1e2c38d5f12845a4f607e2e1018cbb1f5"
version = "v0.0.5"
[[projects]]
- digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2"
name = "github.com/spf13/pflag"
packages = ["."]
- pruneopts = "UT"
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.3"
[[projects]]
branch = "master"
- digest = "1:bbe51412d9915d64ffaa96b51d409e070665efc5194fcf145c4a27d4133107a4"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
- pruneopts = "UT"
revision = "4def268fd1a49955bfb3dda92fe3db4f924f2285"
[[projects]]
branch = "master"
- digest = "1:47a52ed81842d647114b9210719bb6d8e1595094a6cb23e70123bb1f109f3c80"
name = "golang.org/x/sys"
packages = [
"unix",
- "windows",
+ "windows"
]
- pruneopts = "UT"
revision = "6ec70d6a5542cba804c6d16ebe8392601a0b7b60"
[[projects]]
branch = "master"
- digest = "1:bbc4df114ba39b7578b420b423cee3e7d0d3f5a0bf67b47749a9efd83e799c19"
name = "golang.org/x/tools"
packages = [
"go/ast/astutil",
"go/buildutil",
"go/gcexportdata",
"go/internal/gcimporter",
- "go/types/typeutil",
+ "go/types/typeutil"
]
- pruneopts = "UT"
revision = "9a621aea19f8341c01da59e0d42dd97700f677d0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
- input-imports = [
- "github.com/gopherjs/gopherjs",
- "github.com/gopherjs/gopherjs/js",
- "github.com/skycoin/skycoin/src/cipher",
- "github.com/skycoin/skycoin/src/cipher/bip39",
- "github.com/skycoin/skycoin/src/cipher/secp256k1-go",
- "github.com/skycoin/skycoin/src/coin",
- ]
+ inputs-digest = "5c40980efb4f3f523fd74f51d7e55c9b72280a110c63004b3f128618740eea84"
solver-name = "gps-cdcl"
solver-version = 1
diff --git a/Makefile b/Makefile
index 33e4d15..0052176 100644
--- a/Makefile
+++ b/Makefile
@@ -15,7 +15,7 @@ build-js-min: ## Build /skycoin/skycoin.go. The result is minified and saved in
GOOS=linux ./gopherjs-tool build skycoin/skycoin.go -m -o js/skycoin.js
build-wasm: ## Build /wasm/skycoin.go. The result is saved in the repo root as skycoin-lite.wasm
- GOOS=js GOARCH=wasm go build -o skycoin-lite.wasm ./wasm/skycoin.go
+ GOOS=js GOARCH=wasm go build -o js/skycoin-lite.wasm ./wasm/skycoin.go
test-js: ## Run the Go tests using JavaScript
go build -o gopherjs-tool vendor/github.com/gopherjs/gopherjs/tool.go
@@ -27,10 +27,11 @@ test-suite-ts: ## Run the ts version of the cipher test suite for GopherJS. Use
test-suite-ts-extensive: ## Run the ts version of the cipher test suite for GopherJS. All the test cases
cd js && npm run test-extensive
-test-suite-ts-wasm: ## Run the ts version of the cipher test suite for wasm
+test-suite-ts-wasm: ## Run the ts version of the cipher test suite for wasm and additional tests
cd vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go && GOOS=js GOARCH=wasm go test -c -o test.wasm
cd vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2 && GOOS=js GOARCH=wasm go test -c -o test.wasm
cd js && npm run test-wasm
+ cd js/tests && node cipher-wasm-internal.js
cd vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go && rm test.wasm
cd vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2 && rm test.wasm
diff --git a/README.md b/README.md
index 470649c..e67a7c9 100644
--- a/README.md
+++ b/README.md
@@ -38,14 +38,14 @@ performance issues and should not be used. Please compile to a wasm file instead
For the compilation process to javascript library, we use [gopherjs](https://github.com/gopherjs/gopherjs).
To compile the library use `make build-js` or `make build-js-min` (if you want the final file to be minified).
-After compiling, `the main.js` and `main.js.map` files will be created/updated in the root of the repository.
+After compiling, the `skycoin.js` and `skycoin.js.map` files will be created/updated in the `js` folder.
## Compile wasm file
> IMPORTANT: you need Go v1.12.x to use this function. It is not guaranteed to work with Go v1.13+.
To compile the wasm file use `make build-wasm`. After compiling, the `skycoin-lite.wasm` file will be
-created/updated in the root of the repository.
+created/updated in the `js` folder.
## Development
diff --git a/karma-gopher.conf.js b/js/karma-gopher.conf.js
similarity index 100%
rename from karma-gopher.conf.js
rename to js/karma-gopher.conf.js
diff --git a/karma-wasm.conf.js b/js/karma-wasm.conf.js
similarity index 80%
rename from karma-wasm.conf.js
rename to js/karma-wasm.conf.js
index 0820dcd..4410527 100644
--- a/karma-wasm.conf.js
+++ b/js/karma-wasm.conf.js
@@ -15,11 +15,10 @@ module.exports = function (config) {
],
files: [
'tests/cipher-wasm.spec.ts',
- 'tests/cipher-wasm-internal.spec.ts',
{ pattern: 'tests/test-fixtures/*.golden', included: false },
{ pattern: 'skycoin-lite.wasm', included: false },
- { pattern: 'vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/test.wasm', included: false },
- { pattern: 'vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/test.wasm', included: false },
+ { pattern: 'test1.wasm', included: false },
+ { pattern: 'test2.wasm', included: false },
{ pattern: 'tests/utils.ts', included: true },
{ pattern: 'tests/wasm_exec.js', included: true },
],
diff --git a/js/karma.conf.js b/js/karma.conf.js
deleted file mode 100644
index ad68559..0000000
--- a/js/karma.conf.js
+++ /dev/null
@@ -1,49 +0,0 @@
-// Karma configuration file, see link for more information
-// https://karma-runner.github.io/0.13/config/configuration-file.html
-
-module.exports = function (config) {
-
- var cipherParamIndex = process.argv.indexOf('--mode');
- // check if command line has cipher parameter with not empty value
- if (cipherParamIndex > -1 && (cipherParamIndex + 1) < process.argv.length && process.argv[cipherParamIndex + 1]) {
- var cipherMode = process.argv[cipherParamIndex + 1];
- }
-
- config.set({
- basePath: '',
- frameworks: ['jasmine', 'karma-typescript'],
- plugins: [
- require('karma-jasmine'),
- require('karma-chrome-launcher'),
- require('karma-jasmine-html-reporter'),
- require('karma-read-json'),
- require('karma-typescript')
- ],
- files: [
- 'tests/*.spec.ts',
- { pattern: 'tests/test-fixtures/*.golden', included: false },
- { pattern: 'tests/*.ts', included: true },
- { pattern: 'skycoin.js', included: true }
- ],
- preprocessors: {
- "**/*.ts": "karma-typescript"
- },
- client: {
- clearContext: false // leave Jasmine Spec Runner output visible in browser
- },
- reporters: ['progress', 'kjhtml', 'karma-typescript'],
- karmaTypescriptConfig: {
- bundlerOptions: {
- constants: {
- "TESTING_MODE": cipherMode
- }
- }
- },
- port: 9876,
- colors: true,
- logLevel: config.LOG_INFO,
- autoWatch: true,
- browsers: ['ChromeHeadless', 'Chrome'],
- singleRun: false
- });
-};
diff --git a/js/tests/cipher-wasm-internal.js b/js/tests/cipher-wasm-internal.js
new file mode 100644
index 0000000..dd25954
--- /dev/null
+++ b/js/tests/cipher-wasm-internal.js
@@ -0,0 +1,62 @@
+// Runs the tests from src/cipher/sec256k1-go/ and src/cipher/sec256k1-go/secp256k1-go2/.
+// It needs the tests to be compiled, so it was created for being called by "make test-suite-ts-wasm"
+
+// Required for wasm_exec to work correctly in Node.js.
+const util = require('util');
+TextEncoder = util.TextEncoder;
+TextDecoder = util.TextDecoder
+
+require('./wasm_exec');
+const fs = require('fs');
+
+// Required for wasm_exec to work correctly in Node.js.
+performance = {
+ now() {
+ const [sec, nsec] = process.hrtime();
+ return sec * 1000 + nsec / 1000000;
+ },
+};
+
+// Required for wasm_exec to work correctly in Node.js.
+const nodeCrypto = require("crypto");
+crypto = {
+ getRandomValues(b) {
+ nodeCrypto.randomFillSync(b);
+ },
+};
+
+// wasm_exec uses console.warn in case of error, so this code uses it to detect when a test fails.
+const tmp = console.warn;
+console.warn = (message, ...optionalParams) => {
+ tmp(message, optionalParams);
+ // Is a test fails, the process is closed with an error code.
+ process.exit(1);
+};
+
+runTest1 = function() {
+ const testFile = fs.readFileSync('../../vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/test.wasm', null).buffer;
+ const go = new global.Go();
+ WebAssembly.instantiate(testFile, go.importObject).then(result => {
+ go.run(result.instance).then(() => {
+ runTest2();
+ }, err => {
+ console.log(err);
+ process.exit(1);
+ });
+ });
+}
+
+runTest2 = function() {
+ const testFile = fs.readFileSync('../../vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/test.wasm', null).buffer;
+ const go = new global.Go();
+ WebAssembly.instantiate(testFile, go.importObject).then(result => {
+ go.run(result.instance).then(() => {
+
+ }, err => {
+ console.log(err);
+ process.exit(1);
+ });
+ });
+}
+
+runTest1();
diff --git a/js/tests/cipher-wasm-internal.spec.ts b/js/tests/cipher-wasm-internal.spec.ts
deleted file mode 100644
index 3b97e87..0000000
--- a/js/tests/cipher-wasm-internal.spec.ts
+++ /dev/null
@@ -1,66 +0,0 @@
-// Runs the tests from src/cipher/sec256k1-go/ and src/cipher/sec256k1-go/secp256k1-go2/
-// after compiled to wasm
-
-declare var Go: any;
-
-describe('Tnternal test ', () => {
-
- let warningShown = false;
-
- const tmp = console.warn;
- console.warn = (message, ...optionalParams) => {
- warningShown = true;
- tmp(message, optionalParams);
- };
-
- let originalTimeout;
-
- beforeEach(function() {
- originalTimeout = jasmine.DEFAULT_TIMEOUT_INTERVAL;
- jasmine.DEFAULT_TIMEOUT_INTERVAL = 60000;
- });
-
- afterEach(function() {
- jasmine.DEFAULT_TIMEOUT_INTERVAL = originalTimeout;
- });
-
- it('test from src/cipher/sec256k1-go/ should pass', done => {
- warningShown = false;
- fetch('base/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/test.wasm').then(response => {
- response.arrayBuffer().then(ab => {
- const go = new Go();
- window['WebAssembly'].instantiate(ab, go.importObject).then(result => {
- go.run(result.instance).then(result => {
- if (warningShown == false) {
- done();
- } else {
- fail('Test failed.');
- }
- }, err => {
- fail('Test failed.');
- });
- });
- });
- });
- });
-
- it('test from src/cipher/sec256k1-go/secp256k1-go2/ should pass', done => {
- warningShown = false;
- fetch('base/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/test.wasm').then(response => {
- response.arrayBuffer().then(ab => {
- const go = new Go();
- window['WebAssembly'].instantiate(ab, go.importObject).then(result => {
- go.run(result.instance).then(result => {
- if (warningShown == false) {
- done();
- } else {
- fail('Test failed.');
- }
- }, err => {
- fail('Test failed.');
- });
- });
- });
- });
- });
-});
diff --git a/js/tests/cipher-wasm.spec.ts b/js/tests/cipher-wasm.spec.ts
index e2dead3..45b5f12 100644
--- a/js/tests/cipher-wasm.spec.ts
+++ b/js/tests/cipher-wasm.spec.ts
@@ -105,7 +105,7 @@ describe('CipherProvider Lib', () => {
it(`should verify signature correctly`, done => {
testData.forEach(data => {
- const result = window['SkycoinCipherExtras'].verifySignature(data.public_key, data.signature, data.hash);
+ const result = window['SkycoinCipherExtras'].verifyPubKeySignedHash(data.public_key, data.signature, data.hash);
expect(result).toBeNull();
done();
});
@@ -113,7 +113,7 @@ describe('CipherProvider Lib', () => {
it(`should check signature correctly`, done => {
testData.forEach(data => {
- const result = window['SkycoinCipherExtras'].chkSig(data.address, data.hash, data.signature);
+ const result = window['SkycoinCipherExtras'].verifyAddressSignedHash(data.address, data.signature, data.hash);
expect(result).toBeNull();
done();
});
@@ -121,7 +121,7 @@ describe('CipherProvider Lib', () => {
it(`should verify signed hash correctly`, done => {
testData.forEach(data => {
- const result = window['SkycoinCipherExtras'].verifySignedHash(data.signature, data.hash);
+ const result = window['SkycoinCipherExtras'].verifySignatureRecoverPubKey(data.signature, data.hash);
expect(result).toBeNull();
done();
});
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/address_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/address_test.go
index 29454cb..b2ed548 100644
--- a/vendor/github.com/skycoin/skycoin/src/cipher/address_test.go
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/address_test.go
@@ -1,6 +1,7 @@
package cipher
import (
+ "errors"
"testing"
"github.com/stretchr/testify/require"
@@ -16,9 +17,9 @@ func TestMustDecodeBase58Address(t *testing.T) {
require.Panics(t, func() { MustDecodeBase58Address("") })
require.Panics(t, func() { MustDecodeBase58Address("cascs") })
b := a.Bytes()
- h := string(base58.Hex2Base58(b[:len(b)/2]))
+ h := string(base58.Encode(b[:len(b)/2]))
require.Panics(t, func() { MustDecodeBase58Address(h) })
- h = string(base58.Hex2Base58(b))
+ h = string(base58.Encode(b))
require.NotPanics(t, func() { MustDecodeBase58Address(h) })
a2 := MustDecodeBase58Address(h)
require.Equal(t, a, a2)
@@ -42,6 +43,9 @@ func TestMustDecodeBase58Address(t *testing.T) {
// trailing zeroes are invalid
badAddr = a.String() + "000"
require.Panics(t, func() { MustDecodeBase58Address(badAddr) })
+
+ null := "1111111111111111111111111"
+ require.Panics(t, func() { MustDecodeBase58Address(null) })
}
func TestDecodeBase58Address(t *testing.T) {
@@ -49,16 +53,19 @@ func TestDecodeBase58Address(t *testing.T) {
a := AddressFromPubKey(p)
require.NoError(t, a.Verify(p))
- a2, err := DecodeBase58Address("")
+ _, err := DecodeBase58Address("")
require.Error(t, err)
- a2, err = DecodeBase58Address("cascs")
+
+ _, err = DecodeBase58Address("cascs")
require.Error(t, err)
+
b := a.Bytes()
- h := string(base58.Hex2Base58(b[:len(b)/2]))
- a2, err = DecodeBase58Address(h)
+ h := string(base58.Encode(b[:len(b)/2]))
+ _, err = DecodeBase58Address(h)
require.Error(t, err)
- h = string(base58.Hex2Base58(b))
- a2, err = DecodeBase58Address(h)
+
+ h = string(base58.Encode(b))
+ a2, err := DecodeBase58Address(h)
require.NoError(t, err)
require.Equal(t, a, a2)
@@ -86,6 +93,12 @@ func TestDecodeBase58Address(t *testing.T) {
as2 = as + "000"
_, err = DecodeBase58Address(as2)
require.Error(t, err)
+
+ // null address is invalid
+ null := "1111111111111111111111111"
+ _, err = DecodeBase58Address(null)
+ require.Error(t, err)
+ require.Equal(t, ErrAddressInvalidChecksum, err)
}
func TestAddressFromBytes(t *testing.T) {
@@ -108,30 +121,32 @@ func TestAddressFromBytes(t *testing.T) {
a.Version = 2
b = a.Bytes()
_, err = AddressFromBytes(b)
- require.EqualError(t, err, "Invalid version")
+ require.EqualError(t, err, "Address version invalid")
}
-func TestBitcoinAddressFromBytes(t *testing.T) {
+func TestMustAddressFromBytes(t *testing.T) {
p, _ := GenerateKeyPair()
a := AddressFromPubKey(p)
- a2, err := BitcoinAddressFromBytes(a.BitcoinBytes())
- require.NoError(t, err)
+ a2 := MustAddressFromBytes(a.Bytes())
require.Equal(t, a2, a)
// Invalid number of bytes
- b := a.BitcoinBytes()
- _, err = BitcoinAddressFromBytes(b[:len(b)-2])
- require.EqualError(t, err, "Invalid address length")
+ b := a.Bytes()
+ require.Panics(t, func() {
+ MustAddressFromBytes(b[:len(b)-2])
+ })
// Invalid checksum
b[len(b)-1] += byte(1)
- _, err = BitcoinAddressFromBytes(b)
- require.EqualError(t, err, "Invalid checksum")
+ require.Panics(t, func() {
+ MustAddressFromBytes(b)
+ })
a.Version = 2
- b = a.BitcoinBytes()
- _, err = BitcoinAddressFromBytes(b)
- require.EqualError(t, err, "Invalid version")
+ b = a.Bytes()
+ require.Panics(t, func() {
+ MustAddressFromBytes(b)
+ })
}
func TestAddressRoundtrip(t *testing.T) {
@@ -171,95 +186,10 @@ func TestAddressString(t *testing.T) {
require.Equal(t, a2, a3)
}
-func TestBitcoinAddress1(t *testing.T) {
- seckey := MustSecKeyFromHex("1111111111111111111111111111111111111111111111111111111111111111")
- pubkey := PubKeyFromSecKey(seckey)
- pubkeyStr := "034f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa"
- require.Equal(t, pubkeyStr, pubkey.Hex())
- bitcoinStr := "1Q1pE5vPGEEMqRcVRMbtBK842Y6Pzo6nK9"
- bitcoinAddr := BitcoinAddressFromPubkey(pubkey)
- require.Equal(t, bitcoinStr, bitcoinAddr)
-}
-
-func TestBitcoinAddress2(t *testing.T) {
- seckey := MustSecKeyFromHex("dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd")
- pubkey := PubKeyFromSecKey(seckey)
- pubkeyStr := "02ed83704c95d829046f1ac27806211132102c34e9ac7ffa1b71110658e5b9d1bd"
- require.Equal(t, pubkeyStr, pubkey.Hex())
- bitcoinStr := "1NKRhS7iYUGTaAfaR5z8BueAJesqaTyc4a"
- bitcoinAddr := BitcoinAddressFromPubkey(pubkey)
- require.Equal(t, bitcoinStr, bitcoinAddr)
-}
-
-func TestBitcoinAddress3(t *testing.T) {
- seckey := MustSecKeyFromHex("47f7616ea6f9b923076625b4488115de1ef1187f760e65f89eb6f4f7ff04b012")
- pubkey := PubKeyFromSecKey(seckey)
- pubkeyStr := "032596957532fc37e40486b910802ff45eeaa924548c0e1c080ef804e523ec3ed3"
- require.Equal(t, pubkeyStr, pubkey.Hex())
- bitcoinStr := "19ck9VKC6KjGxR9LJg4DNMRc45qFrJguvV"
- bitcoinAddr := BitcoinAddressFromPubkey(pubkey)
- require.Equal(t, bitcoinStr, bitcoinAddr)
-}
-
-func TestBitcoinWIPRoundTrio(t *testing.T) {
-
- _, seckey1 := GenerateKeyPair()
- wip1 := BitcoinWalletImportFormatFromSeckey(seckey1)
- seckey2, err := SecKeyFromWalletImportFormat(wip1)
- wip2 := BitcoinWalletImportFormatFromSeckey(seckey2)
-
- require.NoError(t, err)
- require.Equal(t, seckey1, seckey2)
- require.Equal(t, seckey1.Hex(), seckey2.Hex())
- require.Equal(t, wip1, wip2)
-
-}
-
-func TestBitcoinWIP(t *testing.T) {
- //wallet input format string
- var wip = []string{
- "KwntMbt59tTsj8xqpqYqRRWufyjGunvhSyeMo3NTYpFYzZbXJ5Hp",
- "L4ezQvyC6QoBhxB4GVs9fAPhUKtbaXYUn8YTqoeXwbevQq4U92vN",
- "KydbzBtk6uc7M6dXwEgTEH2sphZxSPbmDSz6kUUHi4eUpSQuhEbq",
- }
- //the expected pubkey to generate
- var pub = []string{
- "034f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa",
- "02ed83704c95d829046f1ac27806211132102c34e9ac7ffa1b71110658e5b9d1bd",
- "032596957532fc37e40486b910802ff45eeaa924548c0e1c080ef804e523ec3ed3",
- }
- //the expected addrss to generate
- var addr = []string{
- "1Q1pE5vPGEEMqRcVRMbtBK842Y6Pzo6nK9",
- "1NKRhS7iYUGTaAfaR5z8BueAJesqaTyc4a",
- "19ck9VKC6KjGxR9LJg4DNMRc45qFrJguvV",
- }
-
- for i := range wip {
- seckey, err := SecKeyFromWalletImportFormat(wip[i])
- require.Equal(t, nil, err)
- _ = MustSecKeyFromWalletImportFormat(wip[i])
- pubkey := PubKeyFromSecKey(seckey)
- require.Equal(t, pub[i], pubkey.Hex())
- bitcoinAddr := BitcoinAddressFromPubkey(pubkey)
- require.Equal(t, addr[i], bitcoinAddr)
- }
-
- /*
- seckey := MustSecKeyFromHex("47f7616ea6f9b923076625b4488115de1ef1187f760e65f89eb6f4f7ff04b012")
- pubkey := PubKeyFromSecKey(seckey)
- pubkey_str := "032596957532fc37e40486b910802ff45eeaa924548c0e1c080ef804e523ec3ed3"
- require.Equal(t, pubkey_str, pubkey.Hex())
- bitcoin_str := "19ck9VKC6KjGxR9LJg4DNMRc45qFrJguvV"
- bitcoin_addr := BitcoinAddressFromPubkey(pubkey)
- require.Equal(t, bitcoin_str, bitcoin_addr)
- */
-}
-
func TestAddressBulk(t *testing.T) {
-
for i := 0; i < 1024; i++ {
- pub, _ := GenerateDeterministicKeyPair(RandByte(32))
+ pub, _, err := GenerateDeterministicKeyPair(RandByte(32))
+ require.NoError(t, err)
a := AddressFromPubKey(pub)
require.NoError(t, a.Verify(pub))
@@ -267,7 +197,6 @@ func TestAddressBulk(t *testing.T) {
a2, err := DecodeBase58Address(s)
require.NoError(t, err)
require.Equal(t, a2, a)
-
}
}
@@ -279,3 +208,25 @@ func TestAddressNull(t *testing.T) {
a = AddressFromPubKey(p)
require.False(t, a.Null())
}
+
+func TestAddressFromSecKey(t *testing.T) {
+ p, s := GenerateKeyPair()
+ a, err := AddressFromSecKey(s)
+ require.NoError(t, err)
+ // Valid pubkey+address
+ require.NoError(t, a.Verify(p))
+
+ _, err = AddressFromSecKey(SecKey{})
+ require.Equal(t, errors.New("Attempt to load null seckey, unsafe"), err)
+}
+
+func TestMustAddressFromSecKey(t *testing.T) {
+ p, s := GenerateKeyPair()
+ a := MustAddressFromSecKey(s)
+ // Valid pubkey+address
+ require.NoError(t, a.Verify(p))
+
+ require.Panics(t, func() {
+ MustAddressFromSecKey(SecKey{})
+ })
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/base58/base58_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/base58/base58_test.go
new file mode 100644
index 0000000..447fcab
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/base58/base58_test.go
@@ -0,0 +1,1199 @@
+/*
+Package base58 implements base58 encoding, used for Skycoin and Bitcoin addresses
+*/
+package base58
+
+// Useful materials:
+// https://en.bitcoin.it/wiki/Base_58_Encoding
+// http://www.strongasanox.co.uk/2011/03/11/base58-encoding-in-python/
+
+import (
+ "bytes"
+ "crypto/rand"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+var benchmarkAddr = "BbZ79o3JNbvi4fifByyopgdS5q6uT9ytmj"
+
+func BenchmarkEncode(b *testing.B) {
+ b.ReportAllocs()
+ if _, err := Decode(benchmarkAddr); err != nil {
+ b.Fail()
+ }
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ _, _ = Decode(benchmarkAddr) //nolint:errcheck
+ }
+}
+
+func BenchmarkEncodeOld(b *testing.B) {
+ b.ReportAllocs()
+ if _, err := oldBase582Hex(benchmarkAddr); err != nil {
+ b.Fail()
+ }
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ _, _ = oldBase582Hex(benchmarkAddr) //nolint:errcheck
+ }
+}
+
+func BenchmarkDecode(b *testing.B) {
+ b.ReportAllocs()
+ d, err := Decode(benchmarkAddr)
+ if err != nil {
+ b.Fail()
+ }
+
+ e := Encode(d)
+ if e != benchmarkAddr {
+ b.Fail()
+ }
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ _ = Encode(d)
+ }
+}
+
+func BenchmarkDecodeOld(b *testing.B) {
+ b.ReportAllocs()
+ d, err := oldBase582Hex(benchmarkAddr)
+ if err != nil {
+ b.Fail()
+ }
+
+ e := oldHex2Base58(d)
+ if string(e) != benchmarkAddr {
+ b.Fail()
+ }
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ _ = oldHex2Base58(d)
+ }
+}
+
+func testEncodeDecode(t *testing.T, a string) {
+ bin, err := Decode(a)
+ require.NoError(t, err)
+ chk := Encode(bin)
+ require.Equal(t, a, chk)
+
+ // The old base58 package cannot decode strings of all 1s
+ allZeros := true
+ for _, c := range a {
+ if c != '1' {
+ allZeros = false
+ break
+ }
+ }
+ if allZeros {
+ return
+ }
+
+ bin2, err := oldBase582Hex(a)
+ require.NoError(t, err)
+ require.True(t, bytes.Equal(bin, bin2))
+
+ chk2 := oldHex2Base58(bin)
+ require.Equal(t, chk, string(chk2))
+}
+
+func randBase58String(t *testing.T, n int) string {
+ b := make([]byte, n)
+ _, err := rand.Read(b)
+ require.NoError(t, err)
+ for i, c := range b {
+ b[i] = btcAlphabet.encode[c%58]
+ }
+ return string(b)
+}
+
+func TestEncodeDecodeRandom(t *testing.T) {
+ strlen := 127
+ n := strlen * 1000
+
+ for i := 0; i < n; i++ {
+ x := randBase58String(t, (i%strlen)+1)
+ t.Run(x, func(t *testing.T) {
+ testEncodeDecode(t, x)
+ })
+ }
+
+ for i := 128; i < 1024; i++ {
+ x := randBase58String(t, i)
+ t.Run(x, func(t *testing.T) {
+ testEncodeDecode(t, x)
+ })
+ }
+}
+
+func TestEncodeDecodeEmpty(t *testing.T) {
+ b, err := Decode("")
+ require.Nil(t, b)
+ require.Equal(t, ErrInvalidString, err)
+
+ x := Encode(nil)
+ require.Equal(t, x, "")
+
+ x = Encode([]byte{})
+ require.Equal(t, x, "")
+}
+
+func TestDecodeBadChar(t *testing.T) {
+ x := []byte("1111")
+
+ for i := 0; i < 256; i++ {
+ if i < 128 && btcAlphabet.decode[i] != -1 {
+ continue
+ }
+ y := append(x, byte(i))
+ _, err := Decode(string(y))
+ require.Equal(t, ErrInvalidChar, err)
+ }
+
+ bad := []string{
+ " bxpUG8sCjeT6X1ES5SbD2LZrRudqiTY7wx", // preceding whitespace
+ " bxpUG8sCjeT6X1ES5SbD2LZrRudqiTY7wx", // preceding whitespaces
+ "0bxpUG8sCjeT6X1ES5SbD2LZrRudqiTY7wx", // preceding 0
+ "00000bxpUG8sCjeT6X1ES5SbD2LZrRudqiTY7wx", // preceding 0s
+ "bxpUG8sCjeT6X1ES5SbD2LZrRudqiTY7wx0", // trailing 0
+ "bxpUG8sCjeT6X1ES5SbD2LZrRudqiTY7wx0000", // trailing 0s
+ "-0xaA7F2DfD73e7035-93___x___6P3Or9N81_1____n__285_nJ25WTRQ_vexQWdN14S__9.9274920729751111",
+ "-05425.-2-___0xfF.037171326036317302-0xaBe05160542057653662WBKTdT58ZxXGpoYtbaJ5UPYnVLNmw2BHSH82885604906510132935198.-07771-0x2f6E663acEaDdaae7e",
+ }
+
+ for _, y := range bad {
+ _, err := Decode(y)
+ require.Equal(t, ErrInvalidChar, err)
+ }
+}
+
+func TestEncodeDecodeKnownAddrs(t *testing.T) {
+ testAddr := []string{
+ // Empty address
+ "1111111111111111111111111",
+
+ // Example addresses from mr-tron/base58
+ "1QCaxc8hutpdZ62iKZsn1TCG3nh7uPZojq",
+ "1DhRmSGnhPjUaVPAj48zgPV9e2oRhAQFUb",
+ "17LN2oPYRYsXS9TdYdXCCDvF2FegshLDU2",
+ "14h2bDLZSuvRFhUL45VjPHJcW667mmRAAn",
+
+ // Randomly generated Skycoin addresses
+ "968o2vpiSDAMBCNNAAMCdVHYVvZcxgH2eW",
+ "CBKuGtiSttzgy6JyYYUTx9JTesxjhXKSHo",
+ "UBWDAVvH6ZmKX8KH86EWLfA7Du2qKVgKNw",
+ "21G6AU7rpRzCMuFp2UCstD6PZY3Mcjage4W",
+ "rRfsgEUCTgk3AZcoYCRmS5Rinadrw8LAp6",
+ "28QAWt5iBxYnJVtpSayLHsqqWUSQp6XDNE8",
+ "DVjnRXWnxyr1ueeSYpVKWn61jxbYyYND4X",
+ "4gWTKmjDvpJe34i9i2i33riVZiew21V3P6",
+ "2gcpPYo9XLjSfAcFS1mV4NJR9jF7528L7TU",
+ "pxUF2qJ34aKgNLWSsoTHm8iuMH9bgyXyAB",
+ "2jsTjaKPWwomBGzHoa5qZm22wdt59LmkFwb",
+ "Nadb59i6pkMeTkwbXMfWbftoaTj8Qx1o7Z",
+ "2iEPcNEQg6YQYt2GLjSGJwJEDdLcLvgnatA",
+ "2WBKTdT58ZxXGpoYtbaJ5UPYnVLNmw2BHSH",
+ "oq4yRyppcigt4S25Sdjx1Ncpmzyxw67SLe",
+ "2NauYwSe9RPsHY7oZaLuakP44ofYziv5wte",
+ "Y9jPpWaLfoTQYkhrFzgnKbCpjMrrTDvWX2",
+ "TQfQQQ8ciaSz2DT8tBzqyoVzA5vg3qZm8n",
+ "yUn7TxndNzRoZ34hMijhMyh7CQnYA7dAn4",
+ "26rQC5KKEgYyiLhSWpef94gHXKjYWPdjQx2",
+ "8i4EU2R8aiS1QE1PMhE6qvgqencUowHZnp",
+ "TbFtcHeUqef3JeKMBrRxzRvBXK4nJtRoe7",
+ "w1tyShnSfWH3sUjz4HcK6rPVHvzgdhLwJz",
+ "m7QJ6LxNFnGs2vD3dXYrbisfT56QqkW15b",
+ "2VtEEEvHDSCMGq72KBjzyS27ZADwKG4kjgd",
+ "2LCYUfj3rwu4N5NY4wqNX7STAkyXu3LfdYC",
+ "WVmwB9KMYnd2c1xRfESa7wdgGuNLMU4cni",
+ "cj29Fobo9aRSSEJuWEtfBihHsgHExuYVBD",
+ "ZLyUpWtSMDqHHse2xA55zbefa84cx8ykUd",
+ "PBENV7bgFYMsnvUbDJJfUF46JukjAvf7Dm",
+ "UY7vLUfL8cRLCLuRdGTPRxye5sRxrxFTbV",
+ "27q7HQnk73eBwKSLd1yTocrMuwiTAy8Q7mp",
+ "Vb4Qa9uhz2erHQvFZfV9Ns449WDj3rfCwm",
+ "upWBoRfwhi4Fh6AytmypNgpXVVZTydhtzM",
+ "2GwBqNwooQcZWcToHZPJXu8oe6zM1GDSssz",
+ "SpV85zojCcHQAvarwo2i1EDVmTyh1FzffJ",
+ "eniYXrjCMu2R83kVzjmSq4peLwPWEyBHJj",
+ "2R5mhWq2ruoBAqpR4o1rrkz7UzmVdfs5eWw",
+ "24fhrKkXGSTjJHLdSiHNYQCWXHJAq6sevKq",
+ "hHHRHkDv3Qzmg3nTpdkao3Fy6FFQNmtEJS",
+ "PV1EuQjLv62DK6b2dgd8mgE3idK9mJZTmY",
+ "2hQH3Ku45HYYQR4pyjaNwxRY84GbKXVkypB",
+ "iAJ8FTFQRvb546QX2i5GP3u5s52GwBJme7",
+ "2ivAdyrhQmAHqMzxEXUvv6B6gdUSuoT77uF",
+ "v2KqYbcPvBRpAbC2mf5xnJ5WTUmGHdwc3U",
+ "2Xe6Y6LMghjUnYv27MXPUNukSgKpWUSeLPG",
+ "2mWaLTPRU2ZjdGZNrrCEvP9jFuNxSq54NUb",
+ "2eEYNokgJJheKu3ooFqCQTUFCx7H2oaPHs6",
+ "24PwdbES8vCVRnLhnazrD4Eh6NQ2oUQTaUy",
+ "TuHr8eDRWQL2T9ZkpjeFVFknEBdHpWG3Qa",
+ "29ebg5EJRsQDhx9NWMpvx8vsWaJmHEa3Jc3",
+ "bgVmUpTcA5DN4gT4vpprY1pTeWvUcoMpu9",
+ "eFP8myi627SNbPr31TcV9KixvcRGyvjAf7",
+ "2hasdKBRdiZ6D4h9qPWyrQJepvbFBxwrQQE",
+ "25UaV7LCx3Xbof9Qb2rwkhw41Diqb8zZWAL",
+ "2XKjCqmATQVNdg7QabYfDTr87ngym71Fk5q",
+ "S7NPCyF2M3HMGUtahnxBNf7hkjhZrrvTkp",
+ "e9Yp46wQtFcraRj7h7KnxuRZEEBt7wWKwv",
+ "2FPW4JERBWzSf9n6jruTYPGAAVuMP4QFtNB",
+ "2FgrEY92rrs4TYQxw7GSCeg6tR4CSg58CnU",
+ "5N3XbiQoLX4wSd9ZAi1KVT89j5pqkDAARY",
+ "224TYwgFgaxZ4Wa9NMzvvD9ejG6mWBboDCF",
+ "S996QG5U5Kbe5p3pDVLgss2GUDxQdG7PHE",
+ "aDBFEf9dXw4voDYNVY4fvwG2GkTGH6X5D5",
+ "MyvhxtWXo88FKFhUjw3AXbjK6YtMiXYjv8",
+ "2iVHso1BsyZJyaFurJjuhTBHSM6i7dU34Av",
+ "GqciZuy28KkK6AKb4U7rgb897xLtyqwe1Y",
+ "UvZSTRe6XfwtLSFN9tEkVQL3qoAT5hQk4s",
+ "2A5fRgBV4hf3vesoboUsJQDsGo4wFbBpHcg",
+ "DnmQebrTbSRwq6aqZNKnVpKPUkNhEdVJYh",
+ "o9BccckziLRNxM7paqdCmMDS36qV4uy3pC",
+ "jC48GN98UjrRkMgqAse4CcWrW4D1kjuE9p",
+ "2fsc1SRRoBt2f6RLEEWnTS85Vmj57oXyUHT",
+ "AKBqyAJd9L2eVY2EZfsjrzFgKYZufg7sHd",
+ "FbxYyQBVLFihSokT5bR66HgmZ2cnbqpmAL",
+ "ktHsYrPsjRBYfhaXfhQc4eoTAN11SELmvA",
+ "WyKgazrVaN4XteQqw4G6o34E1MtcKZAzun",
+ "WH8WiwzXH5vpnZe8xGwU55vzpHZoaWreZL",
+ "2aQC5HrMfXnjUyDfTpUv2nN7mpZcM6JZpUo",
+ "nT5FpoERESCs29c1DAsL9EFLyEVxkvSDjJ",
+ "UkR5f61ptH8sBC4WdRogrNKRpMyHtw4iB",
+ "D1F9egTEGr6dZ4mizzKtUEuZDFaqGwNf3t",
+ "Ahskhos7U4cBeV3JjLu1A7PDrqGmRw9Chf",
+ "26HRUBYwWnGUrg7MCozbcjgny3kraS4AG4T",
+ "2QhZvWvSx3kY2PqLf1CcQ6SxjrWTxN8SnLQ",
+ "2icM26PGcD53GhtWvnwnDRgtLHmV16qSucZ",
+ "quWXbQGiPbNmrK2bxv3wzem4vBgvF8uYyP",
+ "pX6AYyWV7dHxf8p7T2LSTxqrFWsmZPeGvK",
+ "E1wqrVyf1ZfbVGBoEZRDMUv3kVQu3Pkq8x",
+ "obzkDamYMoe4SwHj7wS5By8C7eKU8GBNic",
+ "pD5aLHXrL6eo1QBeNYoc2TKHN7cS7ewqgD",
+ "2eZxS4UnNNcStV6S17UpYauK2mS5bSxom4o",
+ "Lc7GM5ZEcWQpVExM8Xxw1YMxtth2dZpQDs",
+ "AjKGaucxKzt6s3T2RDBSkjz4Kmu1eDiNtX",
+ "PgYHzrzQw2MXK8YGmeRoiUysMhQgK5DmQq",
+ "2hmh5piFh7myRwfhbsCE7VRSSLnxFAsf1me",
+ "2CrYxhD4DZZEZMzmid8LHh4RPmuRzGVQe7Q",
+ "RupPXqWnE3qQpYeYPqvG9w72ywWyPu7znM",
+ "2G6WxnQxVqRXxYsnccnBD3eg2G4Ai6mTswW",
+ "BbZ79o3JNbvi4fifByyopgdS5q6uT9ytmj",
+ "2Y8Agusra9684LaXkmbcTYTmYLm4E6vV7M2",
+ "29zJ7qwq8pQpSEjR9qGMnpUwAHX5ufruaiK",
+ "AtcW5LJNTzm4inSbuMzxs5a2nFezVxzysC",
+ "zGPm8xVsbdYqLqsC7RjKNvHsQ9kWuMEE9r",
+ "od1i8rihfJE55yW63FxHL8o911QqVNqotW",
+ "2FUrR8Tietcz9hT2PKktgHU57xFs26ahtqV",
+ "2b2Cw64hQBCwrtDTuuaXyapLwq1BJgb3VUV",
+ "9RbKMTftN9NmZNhN5wE4efHrS2T7KFMrPn",
+ "Dp3ayfLQC5d9izUyttG9d4c4HVYmHeWWUW",
+ "ssYjtzYAiz1MPsbuYzXgx6v82mjgCGaYBM",
+ "9xpM5rijPu3hmsUkGcvqUwPSbW8EVyxL8W",
+ "25T4ptpFRKAKnPDbF6XdL2KLdC4yf3kvvQ8",
+ "xtCU2AkDdBAGRRt8CVe422ys718EJfevoi",
+ "TWwozvxWJUeH322fNmUc5DurgihJMMTt7v",
+ "2eoLN4nKupyfHvZpNCwJuh54d2ZXMBmMTe8",
+ "tQPc7ZDnmab7XCoX5zhnXBhUbDTDinhbpZ",
+ "7yPsXcyaBRMZAvc7kbFLgvAqyFgkKtF1zP",
+ "2AUkoPcTtV7n34r6GEjJ5NpB4Tvxe38J1zX",
+ "vKVLoMtZqY2Z6mGBjZqHtXKkMCQU4YNKHk",
+ "2d1hagw2Nc9GCcMgFbHJmdjJz4ZYHfXeNHy",
+ "2juX5e7E9Hd71Sgy2QouFp25oCo1JtnC185",
+ "2SZ7GsR2GiDRxSpc9dAFbrULfLjRaq79scQ",
+ "2L7LtusSY1Mbf3XXEeUvnhayVXvdNDJLpkN",
+ "dtWjBFwsm8bGWTX6hPHyrhD5PU6pmyTjKJ",
+ "crRaWgbh95SMSj9RFtqkT7Dv4f5AuaSiA2",
+ "MHNy4X42aTmpstxd75S8UMTZVLyVtsaTP7",
+ "29Z3CUJutadX9qwP86BaY3k5fkPKo4M44JL",
+ "JiQYF3Y4ZG2qbHeCfLsWu7KTeLoWYHRpp7",
+ "JwhyzTPruhtkuMsCF8RiBx3nX7ancW4rfg",
+ "2MGxsK46oqWiYBzTaMMABDtNDD58u1uKJ84",
+ "2CP7cXex8MnNWrRbpbGQYmNCSwuuztPV6dm",
+ "X3jWBpMKqHVUGBvWFL1VXj7JAUQx1Hd6NL",
+ "2QbwgXpfxRapxL3NfQfxBp8xNsH6yieC1b",
+ "6yMo7f4eQQGRn5C4pr9T9GvCCkNcNEHFqc",
+ "dwSMDngNY4TCv3KzjxtWqLPHy1M4jDtbGU",
+ "2S9PBRNJsYXa1ZoKahHttQwp9bfF89SHURj",
+ "2eFu9YJokP8A4FPnqN9PecZSwBKwXjAZWGS",
+ "uqjUcBkrMoieS5dF7H7v2mpA7o5MA2gU6",
+ "2hUFEB1bBdRWJP4CM756ZH6z54Vzx3VhXp3",
+ "CW42KnE5fu1TEzFevYM4HWge3oYS4KjoCz",
+ "2EdGc2LXy5Jha1uJoACWiBaWL8KuuRExXjP",
+ "S4RT6pKPxzH8zZb3ZX5L22F1fKsugfe9QM",
+ "j1AvZ3FYGEjeK5zMA6JqYy26mKkSgHCik7",
+ "2BriifqsgCbshuaPDvXKF6qtAauc6Y28M9f",
+ "xTwV8bL6cqG6XzfhjZq2RhAwAq1sCpEd1u",
+ "KxuGSVGkW2otJjkZqtT2SBciaxq8Y8rgXt",
+ "tsrbLTBEs9z62mGRQSe4koEd1cBiMBXhSo",
+ "rXjQwusifw9KGYe5yefVwaJ81z8S7ufnPY",
+ "zk9efvbDD8SvBnTCXneWPN9tQNLUb5FwGr",
+ "2WxkLNq5dhTSdUL1TF6ewjuYomPdpwV7Ebz",
+ "x49WSdipZ8d8aibBSUrxxTYeqaTiDE6quk",
+ "zVyoz9W2zSCfYhWwSWjsQ1xrHSVkuEYJ43",
+ "24zBSMs4abJXH4rmHfCMYGUZkaQf5NFSutL",
+ "2mZs6wDTJrEoRSWkYaub4VpgiGg2CZE4jVZ",
+ "2DFRuFFHJBbfePD1ZvrbforZo3ntPg32eHM",
+ "Ak54Z28R8ov6zwWXz4fZ2m9jYC66rKxZvh",
+ "2Wkn3PfetHFQzkm5xkJVK4NGcETokvDj6MH",
+ "2M1XpNSr3gRide7aRyufQeky9mHhVcPhtgA",
+ "2U9rNTcUvTT7F9pdMf9g8QPdjETmU2ux1Vi",
+ "2NTowTEg7K1876jhggYLAbr2AyVE8j3vPRN",
+ "jZDVtUiSXEvxwgnU1uTapFzPT6pfyHWNgc",
+ "aH3qZEo2cytHemDurzdo9hHveDmkyi6jcR",
+ "2XDHzAcfZu3H27kmCzT7DmEH1N8PHg5sMbc",
+ "Cd3Z7Lxrvdgb3EuuuyqNmqCJu2VAiionvU",
+ "BdU8LDbRz817PDiyJRbUzX9kvwgJuRJL4v",
+ "2fD34aN1kxUkKSHAVUkcTSZmVpV1ZPMK4jY",
+ "2SGdzLKVwRY7dA6vrfZTZqioUuR7yEWHb8",
+ "t4aPvPTwy4NQr4sHUbXFPCbEpPHMnHn9fw",
+ "2fywmk1JFBWfrbYKpyivF2qt1XjV7rTuCqD",
+ "zSP3W2ZbzCWwvk69JqvREPxHEvb1U9Q68G",
+ "2QHvvvKBKNraiCvoGx4fU3Xwq9tgrrc5arR",
+ "2kVXSkF2EXE7LxbcSwLs6Gyh9CgUXf2gYgE",
+ "PmSgmhtqHJq5Rs7LxvMTFuZH5TQzZgHamz",
+ "28CeHJSraNtzbtFRCVCf35ju3sMrmKGJwwX",
+ "22KKg3ZgPeay3rQirrQmaARYwqEqinHyppQ",
+ "2k8z7Me72JErTCZ1LsZby2fp8NewHKST1iV",
+ "VMvAaPF5xer1ajGXZRdjGLVpCMLpTc9MeZ",
+ "2Qj61y52vYpAJvSLwgBABADvLZ5F4BErJHE",
+ "2grhyrLNiRNyqTWJcGsvAbR8y14M4zuHCDY",
+ "y6xApWEtFdG52uzi2ZJudJUiNMsmCBFxgr",
+ "2dvXKoFtbt8Gwqvb2HsxWvsxf8wFfArnGg4",
+ "2PZCcqs48BZRm6mBQEbTC4gG7t64nNgqEi3",
+ "sq7PPbj2JyWywT46BNsEnnwmH9tPVnGKSp",
+ "PCWi3JbuFvkbpE9swb7F1SYW6aFk2tDYbo",
+ "2GfvH3qbf1SFs7cPJCHEVzYqjdKiXuqJ2gt",
+ "vgXornG4bZRsg4NUWBqrhT2Aooh8ejxY6t",
+ "RBV2tdZuUnj2UWZSocwTEmTkpw6y4YbLr7",
+ "2L3oRRE9GkyLLarwnCw4anARV5UVmNW6eD2",
+ "2bJgFYgXe7tGddAHSaRY9h36s6AGEg3S2Hc",
+ "2hjQgYU2p4mdDqHe5y4bt79vvzTeKJReqMv",
+ "v5e2svHhrgZSrNBkiHwjFf7r5sS5jF6vkc",
+ "2VphWiCSLxM4gPdNpoSJBTHQeBTAMTb4H3R",
+ "2DYvZpmSiM8tsCRmy8syW72jE4ZHfctorUd",
+ "r9iTn1j4FHt71asV4sLkF22AcwtRNvmBmL",
+ "cseFYUEh1qhDgfaLkSwZCZBdNWKam4gJPf",
+ "27jdv47Ja6E361DSKqRcnsiDM7zDongfwvp",
+ "2GQsqSbrkY9gfWpFrBUMJb4W4rq2tAPeWnp",
+ "2SkNmxHNGXFjCgFRd1xrt6Fr59LDyXwhDzT",
+ "2f4WVaas2bLAG6x275qgvgpaWnbMejYA7uh",
+ "kNzATym7CgPeLLZ7H2nTBgfBeoYR8QjHrz",
+ "S6zBom4WwXEQzE5Spa8DAJm3qemBnv6yxM",
+ "wv53C1kFaEtEdw9fRWvcMurpKfCRLj9jRS",
+ "fcRHD2WWVu6RqMwvaWk5KpQcDWnpPbZr7e",
+ "26rugbJW2TUpQW9q9ypLX17pKVjMMC1CuWu",
+ "scXdT4VzVWBAzZ1br9xSjpC21GvdMk48rj",
+ "X1GkgcZNSBJkuQ9BSQUAtmds23z34amY3d",
+ "24R69LPg37jUVAMmyHeS3dqM7j2mdX5uiZ",
+ "2PVAfoEp8aPL2Jjb2zGNGLNjTqx6A6ZsNRD",
+ "2AS42RwRamadzj6b4txfrurW9vcXmfBDih3",
+ "2kwtSmvkSDB9uWT6euiYrP96Lc1NUH5SQx1",
+ "qqM6vDSRNRxahM67yPuBvrJNt5EVuykEss",
+ "2j4FF9MWZwJPt8N39LvP7eGGUTbMuvqiwd2",
+ "FaQUteg23WSWdxqvZ39G6cxJgCJcAyuFzd",
+ "bpDBL8w4nkiViH6xz3wk5qYAfT1eNra3Ss",
+ "oHBvU5gzsMhigpN145k3uHL1xJFEinK9BJ",
+ "FzWD5iUccjFDEp3rFKeyzFnd4Y8YSpCeov",
+ "ys2svmjLCs77rjyDXBeKzusSxYx2dQBpgM",
+ "2DcbwH1WyBtvkkw3zbFHx6hRno9tE2CTqwB",
+ "23SpMyB611QbNvHuoYUpm6Bg7Z7KscfkafE",
+ "21YPWiGiaRGLcKKfqnt1GjNUAxSYkgbGRp5",
+ "2icA39FHL4o8VnSjT2EK9RkJ9qKPo1ajZFf",
+ "2FZBJr7yzpd9VkVToCNwCyyvHuLw7F7oBfT",
+ "2MCtyY5LMVWjZufukWRQPiN1ekwX9xdZ8SK",
+ "2faXP6FNQe5UU6iuJsG1rcsoQ2Y4NAFXGtj",
+ "r5Z3kHMN26WgHUtmgoQZ5qYRaKoDzmYdyM",
+ "2Lc8KDF5F6PtJtZCK8D37ogxdLuyAx6cPKr",
+ "pPbh3MUE1jGizzjTN7cDg7Kz8wHSFUU3yC",
+ "B58XCUERBveC3qwq7nW2HCrSjQYTCFBe3m",
+ "2ACaa3SBQtnyKifQuCcP9ScGzzcywJh8g2Z",
+ "VbQuu42eVtCdrYHyqZnPMrSaWWTiL6jRbP",
+ "cHxLEALyiKNV9aRuLD6AJBMdJb3mGpVqJK",
+ "7ygN2yK3oQ7iMndkSwfqwsX5sRUGMy4EJr",
+ "d3JNUvvLrgQwiBYqsZqf4swF7eRFZ6LzSZ",
+ "JaFL1fTrFqGEi6TRWfsEb6XfP9r3FUEiby",
+ "2SxTvW6rroUZuCtCo5knrnQXAXBGnyzHuB6",
+ "qttTHjfMpRgPzcjcWCEYaNB4nNZVVU51EB",
+ "AtfRrwvMjcvBTrBbsxoBCpw5Lte73hy8ei",
+ "r8ZLUe5q8z2FpEZmQDiQ9VAcp3jzmPsgPZ",
+ "2ZWmujCztGX39TYED9imhn2v2eQUP25pYyj",
+ "2dATBrygCj6R4F589c8XPweZ4FHnyJ4hnd1",
+ "2DY9iaqgXwhAeEVD3xyEeNwcR3YQkMEQbi5",
+ "HsN7mEWu5h2puz2od3xhLWUKd68a9bsFE5",
+ "V2r1gZtgXE5yseBigyQAScnT6YScRSrmMS",
+ "gp2LPg1eDMH3DE4SW41uTXz521Kh1TnHuh",
+ "j5218YBFmnyMtvL6R9H4upiRsCQiwxthX",
+ "2LvEjhkp2mzYUMQwZDE2oQzdVQuFEjaJ1qd",
+ "3BCoLWCcwhd4MzTvNY4zK8DkKN6PMfD5i7",
+ "zePVaeyXvbGdcdEBgMmyTuWny3msTvmhbh",
+ "2iS7zccqPhzCLWwpJnrCYrigDFVQypby4HJ",
+ "Ac9nPU5cdbc3xyK8gq8ibucqYcvzbK8XX3",
+ "2LdJtjTs2DqLVukKwK4uuURrXvsr3pfdjVs",
+ "28WU8Q2XPD75abALS3QQfURNAWU42esV3Ef",
+ "2HmjBLrJpBVHLWGZ4nMcA34Atkt4UFpmqRV",
+ "odB8hai7RDuaas42sC6UC4JDXbuXosDyoF",
+ "QeSQftaAK7auZYjieieA5x51R2QrkPNx4i",
+ "T7xJKGwc6bP4HCCfzCeZujkbDhEobis441",
+ "2Wyqg6Ub6QHv9zba1vCAvFh2aZNzxXZD1y",
+ "9JXQZZJRBXq89jNasT5A397KTcy9nhMLi1",
+ "2JWQE7JJ8fKP3sgNvTrQU1iWV6LCCsgTCLJ",
+ "i8fhbbMLHJry3JJzck6yRxGn5WQe749raj",
+ "WfVL4C1LwQBWAAn9acvryuSJkWmzFWQXbr",
+ "2WGbaZtSxj15MeYnPWV7UFGUQzcSUYmdEb1",
+ "2MNLJARNutG75d1Sonm69hbiwwWnEZJToMm",
+ "QWESFNiqAuw2xDq53XLsYBtmitRx67Mduu",
+ "pv4bUUkzG1J1V98c44E2xEpPu5xZMnzG9y",
+ "26iMZ8fVzHPkf2cnyRLiqLW9FG68eDhwZhT",
+ "Za3Lz5KBpVQxLJRwiwt21xV9g3skHhnJtY",
+ "2D1NrC32foTBDcowbx3DGsYQnHhwKpEFrRw",
+ "WHUQ1LScVYtScdj4zbsVSiKrkokVMYkT29",
+ "Q8ct6Uo3rZvbu8wM9QvtK79UgYW3u7US4o",
+ "sD2etFo4bsxVzQzqXNRvppG5mxjs5593Kb",
+ "LaB7KFHWyYp42bXnfMxrpDJz5fPr1Fo7Mg",
+ "x1tnyYT2GvcGG8HxV5oVfYfaZQYxTtVF1C",
+ "2iXFMtz3fBjyWUEBRvus4M9UZxNMjGg54CP",
+ "UhjjML6iwVhjF2cyQVow87c5tsMzFEEL4J",
+ "m6XgSPMLn25pQKErbjcN6ySNdGzJK6Czi7",
+ "RiihQXtVyb3gUqgVYz9J4oXoZJiWZcrNGA",
+ "2UxCaHF3JmC8u2z9wJYncxWvximAnAQVQPf",
+ "tS7pMxfb4YMaJrjQtA8Pj654raEdQWw4YJ",
+ "LogPXpHU1KSgNtunRisL3oCsPh56ov1mh7",
+ "VpHJKb1fF5EEJKAG5pzSxfwPVT84wvST3K",
+ "2ZDbmk9wq6GvDPMTx1V6fMSwXs3nta2BmEw",
+ "2DmmMLt8xSoDMgmWG2kzEgyyN8P9EywTGNA",
+ "2RHNDJ4c1CTA8gjgJLyfxKFZBfCMjVZjJtW",
+ "2fPR3QNRuK5BksW5EKoETC92UhQmwpQ1P47",
+ "2FKoApfy88pA8v5jMhRbo6Zqfa1t8KZ25fz",
+ "2Dv6XTJWUkDscKJYAkXakKQ8o695s4BBjdE",
+ "EVRDLK9n8RduQcXZDAR2jciwnmNjYfULBX",
+ "SANvVBkc79rUbdcYyYhAgXFaqNTEYRm5T9",
+ "2zNM1cfutEVYSkeDKsHy7pLEMu7voXaFJU",
+ "2QFCHCM7kApUjq8F8tgULK7Yyk8ryNucnCe",
+ "2Uj2ZerjUsjiwf6R21eccURZZa31anBdACo",
+ "TYLS8yFs36C99U7rYK3N1CUTj5JQ3Y64bh",
+ "H6qZgezxVK36ctSChodvGtKpRUoKzhgoYC",
+ "t6tp91kpqsYCLFEenK11h5X5SvpCZJwyuY",
+ "2BETM51cYTqnzC2vyrjTBKi7HwPfiGrvm5G",
+ "2DRyRcJxfSJB4UBv9arEcKkQQsdxmbk7L9c",
+ "2JfYhANaA9cV6wPr1eUgcXznUbEhdekZQaN",
+ "J2T8duWvN1vdrekZis3bE9B2GJzowEnaAP",
+ "Abcsp4svT7L1L5YG7WCr56UAkMZNcnexCj",
+ "ZTwiXNTh57ox33BJKBLrTvgAd44nM5ocTc",
+ "iqvHjmBh6om9yeoWttwvdEcS8c7ikQVFhN",
+ "x5xx8onr5N8ZdD19uoWMAdr2KN3pATid6h",
+ "2LKxjyNUa3wJWifsEhsemD3D5VYEBWuR6hB",
+ "WWyYjDKsdH9c6k42vYV7xUVeE3W5QMoDRP",
+ "2FvRFAkzyWVnEsobsLdUXaKWHziZXgPYY4B",
+ "pgfDSRXW8YoGJMbTygVcmGAocnacifgWeN",
+ "9hPAFWvNbuncxpnZSJ8VTBmvbtsNyrAAjU",
+ "2XSct4fJN4PbfYyUf1ptTrQGJYEZjN61QMf",
+ "GUuQkaZHEJi9CSKrkDvgMPRtUugJR1e1SG",
+ "2UDLvfTcYPXHQigHtntKEHm8omNUhxHjJ3y",
+ "YbB29S65seuZj11QQ2HwLPirBC21rCzGWu",
+ "NQozKm4H3JucTXApyYrb1xSiAVCRm92Egh",
+ "rnqkTxcpoJpvJ3YWBr6FBKLf2bHmeagWEj",
+ "7Sy46MPugGv8LfXd4HYqVKKDJUeR13Mbcj",
+ "ZmYsjwMDxaxS8dDHNctyqii8exgzGZGHxB",
+ "2PK83Ah6KW5zCAXTew6YbJTKFfUZ2Be1uqG",
+ "2eBv3rnJWUTY6J1uGHZwrFfs9hdEG8Qp7vy",
+ "2cpdD3eBPqEGC9KBnrwVjQu2MWtMSsweLXu",
+ "EbusqpeXdHSGWveCe37xB1LuBkRHa7HHiA",
+ "23dWjaxitheagVKRrv6wyp5PpeTcEWL1aKi",
+ "D8Cmz8fjmxrPgeuU1P2ade72x6G4srKt22",
+ "uGvLVnDZmaWxR9XVTr33uhJ6zgbabnjd7B",
+ "bhZoVKietn2YYQqN6FuoZVdaJD7BQDTp53",
+ "22Pr5b6ThHzveHyBHYhTV2PxcH1uCKUhgj2",
+ "USHbaDyDJvbut4JYjDq7Pk82VA6fp5eWXB",
+ "EFc1NuPKFLHpJiqS4Jhtjrqxw18ECVL5dh",
+ "2Wiu3sYsbbB6JXPdJkg1JQc5CWSUKoEWmn3",
+ "2GZAg7zBNucXt8RvYqtnUYhELVgNBvn7d1T",
+ "AsXziWrAtba2TeG1meq1UiFBSw99skaPZk",
+ "kSFHuhrtHBWrsaN81A3RnBCd86V4RFxETW",
+ "FhbRyXNp9PhSJZwPiXSr99p5yHxvYFwcs9",
+ "kWYWfkhuW63JeY9bQk5wP8QevS2pXEwQNE",
+ "2KjSeTh1LJezE5nfSBv31z5frLdbJDtoVDq",
+ "WGo3AmKPy2S7sfosmFSUeUWnrKo7htgePz",
+ "o7Qs8DZF2z9jYTqs5iwn7SdurQ5VkuNqiE",
+ "UJw1vB7bkB2vVcrvcXK1dWvJE7GrXxcc9m",
+ "6DXNuZqS4Qz2Duip7H8jLy1warLCLfwriD",
+ "5fGVmbKSah57EKmqXdfdeuULqWFGhWwKwX",
+ "2VFbTvHqRWe6xFazo5YfTAauMupcEdAc5iL",
+ "vhByUiBJEQMLFhJMndvfeN2fZ3H21rGHK8",
+ "n1Bq6U2q51rZkxvjfkYyfJYdkzU36S7ERu",
+ "2CsTf7QcA6x2yiqJSg4LPs3ekwx2B7ca6ey",
+ "ZdBvpEYeZ1FBU6yhWwnm7hQseMT6d2GS5Z",
+ "2M2CLXodzgmFUaNoJHzRn8ybK3Q5YNxwUSj",
+ "8o8EDreff7XVuuW6CMHxL7dCZXRfwqio5Q",
+ "zcfm9q1sJYmwFRcmorfUcY9pfyXDt5V5M1",
+ "24z7yH2uGTLGwQXxeB1eYxSp5n6U4iUcRV8",
+ "pNz3yFCLwHkWoEucDfmLx7de6rDVtAP6ye",
+ "2Gji3WZif3m2bwsXHMXC3kk5KjxQJxbAbDX",
+ "279rNP8buEFnxgH91TChnd7BjEdUFjhCKbM",
+ "Y9AeMqziJEKRjFeFS2ndQP74RAfxkPCiPd",
+ "9DSHXhSnRjUvU7wg7wK9wrf3Cws1rWqxdy",
+ "2jSc6aubgKxfGi5LEGLfqsKn6ErbnDWxz7f",
+ "fAwgBYeqkNsVEJuCYwtrwi9EAbjzFMagPy",
+ "BTTjycRbzKiq7ANJunQYeDskRD1DkWzuui",
+ "2B7Gfyp3vCu6ZTnmvn3yXGWdogJJUzZ6Y3W",
+ "u9ytRd75EitwfcdyF4mKtFMKoT7vRbuVQ4",
+ "sVTmfs1U49RA2tDv49mpFPtxe572ghySrh",
+ "dzrZPdWmYg96EbrimjCQMTQ4jXN1CfCNVB",
+ "9ZTPux2h63jCiwcWG56pjeYcc4VqVRaMRw",
+ "73wFGG5hQvP2fRfLchVSuoLLf6fD65mPqK",
+ "2EMjHmxZffXofkgV5mxdhvgKaKFGHJPPvhz",
+ "2JVq3V3XZkkmkauMrXrxhiFjvhSyVhjkHvV",
+ "28PpwQyRdScWBaPtbZ8Xs9QxaZ5LhsELuAe",
+ "jLMUJxfiBSBNF863czaNmGBUzX9USvGqW8",
+ "2A3ytdxzegtxC4UPc7dzJjLLsDkwRL5fUDu",
+ "6FaqwhbVTBKExZCo52Xrw3NVrLsa4U8RJx",
+ "KGAYZE98subRmLpUneDqJfmwZTfs5GiJvo",
+ "KHyAUg5Rm9udYgkQbvDn8HGa1i8qCHiaqT",
+ "qEbtRTcnF9382iD61xd384JEmuGnamTMNU",
+ "2TUA6iWeuMXSLPkg7mQ7kAcPTjJinF9fcoV",
+ "d8q9PuocjHRKkpViEgcMruihTZ7LBxPQtt",
+ "VTe4PKZ4m7PdxmzQMK6aEgBTPd3rM9FxpL",
+ "D35H9k6v7MQHv1S3JX6auDyxRUizNRthUa",
+ "aKeJsYKw8H7DHSgMRQ4TNegy6uHukr8zsZ",
+ "2WBvZtBY8NJoNk1rXvRoD6t2Tsa5dirouah",
+ "Wxp3BHydpoHvh8amF6CfPnpWdnZfYykFoi",
+ "QZ6qoEFWKqnat2gEJqpvdjYapwH1aURKYv",
+ "q3TfzjM2jXATnLyYswDCenn4w2nRbFpate",
+ "G51YQhi4WdekifpV9UF78AECxN4Ew4yo29",
+ "Mo4XtmT9SZFhN78w7ByqBFhAr8H5k2Nno7",
+ "2ATrnekWcP8mZuPJ6FtjHBRFyMsNpbuxJZ4",
+ "rKQyC5gwQdtXLruXRzK4KWoFE3YCN51J3s",
+ "2EbyKP1GoGoQGEc1nqP3jsU8jH95jdQaZ6K",
+ "fdFjQhPpUybaEoxSSUWcVBwpQNM57Q2dEe",
+ "ipToi36PJLhumbp1HtzZ7YEerqS58ypJgm",
+ "2EAUuc4EyQk5aCQYXpAdmNsnp3BHawKV5vX",
+ "2QsXdXiNettFYYEbzXMfbRQUsQjZsxFBcT6",
+ "2fWKm871YjbvcTBcyVxdoZZBdo1VP9GdDH6",
+ "22jGbfa5XJZ4JUUYNK9diNBvCy575nFsUKW",
+ "zYF1pA9UkjKeRLEBqQig1iz5jtH9YFBiEj",
+ "2FB7346MUZY3XimDZ9RTM9WhGnuRzdJXd2x",
+ "kSVPyoAWXSszkzUQjVxM1zo1xLKYwbQLz2",
+ "2ZD4TvbXQwpvQ2pGJL1oLUqVbtdQ5H3C7mf",
+ "2HD8hwurNHcKxmW68zjvvqa2HGTVKc1uve9",
+ "mCr2XgYuPL1fRULFU8vM1C9bouJKrFD2tJ",
+ "21mJNT98GNcQvY6bCVC1k5xqJbqhhSm7sk",
+ "qX3c2ZUXUpWEVaf97Z5uJpUUpDszfmFHpm",
+ "2my7qAW3rgLqCZ8bvfnkqvn332fJDLf9Q7v",
+ "2PFXBcUikRwEDiuE5PRxeisJ3VnvR7sCgT4",
+ "iTHmpJqSaKR3gzgZv24x711SLPjmXF5vfz",
+ "2dFeubDMugcGSHN7yvpu54Z3NiKdfoBZQGV",
+ "qAw5Dg1vH9VW5zgthhjpApUob5RcKCphxR",
+ "2X7yaNvNeSyUZ9odXPKVBZNC2sK6mxd59dm",
+ "275S5zxqgdb3QkQFvEZvmnpN8HjQdM9gcg9",
+ "264RKB64NHgRUsm8N27NRQgBWhUQCNZQ8Px",
+ "2G67wabyeqfAFn8f45bv75D3FiP5yc3fgT5",
+ "2Pmw5G7bm6PUZ4tuh98bWMLK5JsEnouHoxY",
+ "2gVSrUYSRcXFfAQ84N8euMqxiAY4C1Ma9Mw",
+ "pTW6o9Uu1y6TX3uPMpthkP4hFzGn2rhbRn",
+ "28kJVjf7DrD6dQwdS6eNccjDFeLZyqHgit",
+ "2B2vmEJnUPALMtBWk4SVwjF1ieipCti1Whp",
+ "VUzo7YVBJW2RGYmCzt58mqXuxnHCuGgNRT",
+ "2KhY53kJWQXS1bAd1rAsuxQTfwihUTZJbm7",
+ "23Bm4w2N6yvjztzBFh9m3cwhWaVaDCACKXD",
+ "dC1CbqNLVmGjidDpCLnWGvCVErX2cStu7k",
+ "2N9Kpdkc7RMFc7YoSMwLnCBrJwuHrq1GfTC",
+ "2Gp7BCQ1Mcop8mDxfHqw919LsHi4uYvPqB7",
+ "bhgzNjYzMNBdCKPobBQu8Nr3ymxApUUdbG",
+ "wjPB8B6AyWFiXdncfYGyaVzn9UaHL8SNLt",
+ "2965DLJePnk3P7gmJr8iBxBmbEvMreVH9pg",
+ "U93sPPLeCUm2zYq2Xr1m8bEDnMC2qkSccY",
+ "ZScxXN8aMTRUKQY83qa4YMP1t9wVBbcbAb",
+ "2K3UXfrcdPhvfibbJabjVzoUmgCVahk89my",
+ "2P7chzgs6ySFe2BMNVHpv3ALgaCZXvKDvW5",
+ "2UiS8X5DzCjkXab86BPpsLEZWTZP38kWdTG",
+ "2SYwoSzatN2srLEXNRFi6g9xTzgmfkx6GDL",
+ "2H2eDvPEMgzVsQVjPCPKKgwBigusgT74Ad1",
+ "wKwnsaDu6W19FJ2LgAH49Hgr93wwzceR8Q",
+ "2ZC5pnUGAzuZz6YVqJ1KB8X5EVAsNw9gVer",
+ "2RmXy98mxKRCLcWgGbaW72zNwduM9ESc6oy",
+ "22temNXH8CaoAjcgp4ckMmpPX51TcjUCrtV",
+ "2bxk56j8SzAorKkPLFvuK7rARaLrW5nxDKe",
+ "2citeCcLqiga6gvXT84gNgTuFQV7eZZQUbz",
+ "PWnDMFTpQHypn2NCgpGxUbbpFznjnsYKCT",
+ "qKi7sYfkX3Ho97v5jh6bnenLREUmDaVmVZ",
+ "2YyFV36bxB5YgnbyjKvf7DG8hg1RRRRW6QM",
+ "2mKeR4Zon97znL2YN6wWYG1Z2Vb9n8c11c4",
+ "2YQgNreV5K1Ddi1Tg4vDrgs2g4Rr4Ve68ET",
+ "71hXWX4ZWZjjVCBypYSoKqrUJYShxCgB1o",
+ "2Zh9AooP5QqPDxwD2CyJ1jpZz9Eh8uJQmFK",
+ "mvt7fKqNxaRmu67wRbGKa6esaTkmKrVPbW",
+ "4hd48TitidFfVJA7PgeEjTo5uCLYqdVx6n",
+ "2kVrcp5e5fMQ5nobLGTj1as4PubjUvVdzEh",
+ "2KXtdeAaG7Sit3Dn2e8J7qUkT5UnF1e8GSD",
+ "Qk6ppegaZsKzBafYnGWmvvA4A43pCEyV4F",
+ "2gGTSiyYR1FHnJWsM2ZwAVP51SLTS9Ytong",
+ "32xUWZdeEQqrA258ofviDMJKydvJB5nBBp",
+ "n3SNQqZseF67wssSTR2Uey32oDjgkEcrgG",
+ "a55rAF1QrVk2AvncsjBh2cWrz3qe7BNPSQ",
+ "qEWewbfwoF933ZHq2MY5cQK6ERX7jMbrcz",
+ "22x77efFKUiKkMAMsLK5rHZz9AnwfRZEoq6",
+ "DjRAQSCHqrcLpta3BN4QMYWkn32pxXSieB",
+ "svgWUw8e7UJ6FfYhke12NH1RHkVp1QnrKn",
+ "2PtLXN2AdGA6EkjyKGTUPnwwiv469sVsLcR",
+ "2krE2Y4SM3adGuvLrbcsWVjDjmarK6fNLC9",
+ "hXj3uCN4qAAjdHutkFZmgQV2frZPsroccv",
+ "xiNdrpYhC49FuxCAS32AJTA5W7234YLXdw",
+ "2ZBzmkycL5aM54hQqYwQF7FRcC2K1FhbMY6",
+ "2ev996qFXc14ipF2onNre4doPzJwdGdz5Lt",
+ "2TY4q3ZX8XNLDsLbj2BpcZMT7BTfENVq1FC",
+ "29mUGDmVzaxB8eSxnhHeKUM8Loq2WVV8bgJ",
+ "GdybrYrqnZhCMZAFQbjfYsWvipzAXKzXaJ",
+ "2HXhnSVQziUTxCJ7mSUuU5sX6Kd1Go9nV6t",
+ "272aiBoYgCHu15C8XTZYghoMJhbPSvLggJk",
+ "5pqf7jE2YmvrzpHMsdxPAeT7yDgkoDNQwa",
+ "2EdhWc1AJqDHAfxt2EWSNGy92KeyD5VrrmX",
+ "pChD8xx1qaCKUz4AWDKmqadDPbKsKPk8z",
+ "GRSTtaK2iUi62WARETPSrqbPJaAmWVJ7TR",
+ "2iyXPWZQfyED5keLrCvFJ9dSnDEQ4KC5jtq",
+ "kW9FaFMHciY54CQbjwtUVxD7aNpEbPYs98",
+ "VY3CwaiMFF3A26CWA3oVR1bVvGMTGq1HTM",
+ "g9TXfNhdHLs7LPFrff1SRVKtv8r8Eh4we9",
+ "w8n9mL1mvg5AuRmcXzfKAfz3KTUqireetq",
+ "3jXUJUNypj2PWPAPGU6RvDR6kLPQSo99rf",
+ "2EhPvEwE9v1uR9JWPqyVhs3C4EDgh6A83Tm",
+ "FGd8PaHwVmLKe7Bb2QtaWygh1xsU5jXpdy",
+ "zBgRHaKFhCTUrjoFmaFv45KsthnvWd8zgM",
+ "2kERzcke41xND8amAghL1ZGqqENALwmSqEQ",
+ "2g4vmC8eYjxsEjTqicVYfZ4GL4G4cWLQ9bt",
+ "9xa2eXd5Lcj8SeHz9Zf5r4dTdKx5f4mJtu",
+ "tQP5PShYf8Myw2b2GjNtCnJSBkc6Hmm8zq",
+ "24zys4ATNj1CQBA3jPkSb77kPkf4XUUA2Aa",
+ "2MEv6A9AJ46VDRTtHVCZgZBmc3gQCtVdWhA",
+ "Lwxvt1Bp6rkMDnD3BHkjoyjk9SWguswnyq",
+ "gqW3mrYhxh2k9gRzzNBuMyELVU3RwvFLPg",
+ "2iuD9JdwqmHX32uHBT279sm9uEMPahLFsNa",
+ "wkEnW3fP1bcsaPfSagbjVAkFczF2Bf6e9h",
+ "HJg4eisTopK9xwQojdiZk7WPEuDeexQQxe",
+ "2cfqJC7Yy8qksPBV1hHkjrcu2XwtcC5MLug",
+ "j65P9RrMBqJxHHttYFJkotAbLMtkaskFKD",
+ "2G9jtJG5f3iJ5Xg1mipUfo3Cqg1ob4heY7q",
+ "2RY2PtFPfLWuMaE2CRcfA9mpvpE442ZnUJ4",
+ "2JQDSNYehQMrDhYnBDouBiB27gjpzGFXUTJ",
+ "SecSSp2bWg12g9wvL3w9cM9a3YeNwj2Z7h",
+ "yZeGyGdR96oVj6tDdCu8hu9uUit2TNU9iB",
+ "2WoB1ZrT2pSfUC9DABmouhsA2ymwx5D2Y7u",
+ "2a1Y54DLBV1tvjCXhoEHnRHRi1SEpJr6zwX",
+ "m6A48nPJu8x7zCFKXjdLhR6tP7GMPiqfCF",
+ "2D5NbU6awwaBu6DVvN5aWn8wjVfF8Yrgvyx",
+ "27WnC8X8S4vmzTkG3DivVDNAEUh73wMvkT1",
+ "2eioRV2JveEzseGV8tZ5EwV8pgqzfRzkcKa",
+ "2EWdcFgAcJXEeQvsqsT5TkSizPCAq4Q4gw1",
+ "NXjbWdD4E4nVUVJr9hFCgUMvAk8vKVv4no",
+ "2kBbcZPh9ESgDiGQD9Chgnuqp15hp4M4d49",
+ "2kMgnRmiJusFHFzRhcx9NhN8iyhgYUNyZ4G",
+ "2XeiysuxPjCr6LQDCViz2baEZqLe4dRopvD",
+ "gheVR16FBFRPB2nqyZeh3Z7BvG8AaQbjfm",
+ "mqKZhBAdnwRNu9zfSLhkFbunw7JDebw1KJ",
+ "2ShXZ3ABrVUgJS7x2BjnkQ76NAjqZMoAac8",
+ "jFTEEjkzbcEy3siZMPW6MNUvLfSfTHKbk7",
+ "2KRhnJRGzFQZk6b5JdXeSGnPoZ93FNHa8Av",
+ "28HC12dpjRYwuGYokVZqFWG4yb2PohDvqZ3",
+ "EngRrHn6H8XWibuC3umWVwKUZu3fubn5Wk",
+ "KJ2SrNrpVRu4k6LGHWXCtS7u791WyPZsxz",
+ "2Gj6RaT3GdnZGTLZhSSdq1YLftnWiq3Rn1U",
+ "2G82D85gXbTNrh3omKGo2dhU3QX19ahpPct",
+ "9Q9eAKKF6gW81Y1Lt1fCRWqxsy74LALLxG",
+ "dWf7P9kHNixeLXezF7ALSnyKRzv3tb1oPP",
+ "amStoiSTVCh6Macih36WCQjfesHrLfZwTC",
+ "2mZDB97w1uHfzDMZySeJttXnbT3cpWbfJim",
+ "2AVAusdbM7qx6hN9M4CMTitvoBjHaXdb6B9",
+ "JBnibxjtgNRSHRjniQbkWEg9CczT8vikkb",
+ "fJ9RcPoAZsfC5Z7xVNKV5K1f6p76kpqPB1",
+ "aNkM9Y9rzSnapxPwuJ8S2mPzSpoJGeAi3H",
+ "NTcWsoz4mEDmpLUqSayMHacDcMp6PbigCs",
+ "UARa6yBWSYAdutsFGUZksVpLzuvE8VRpue",
+ "hLCpMLHxfCqSDpd4d7EtGiTiu9eqNHVv7y",
+ "sZDwAoDuLNv9jgzVbCYMorNvsKYgyxDtwm",
+ "WBuffV1ZEnVxkxge1oqaKM4LxeBHhiXQMP",
+ "i4Sw4wKLk3rwuK84BpTkm3FaXt3Dd5Fzst",
+ "VQV2axCh8vUhDCuvQH6j718PxRNK9pRf62",
+ "27wF2RLDdzX2MQPexSVHqpD3YmgoWC6VSkk",
+ "22Q4YZpZbXiJnHCejynUUT2f2bZiaWPm5ZS",
+ "2Jd2JaBEG5J66ynwxLjvKcAQDWhAA8Zfy3t",
+ "2CZmbuyiNSBScu2ySM1zyHw2Nm4rnx1qA29",
+ "mR5Zh2qRosvdntUHXp9FNBaGizS37RrR3s",
+ "2CxZNSjcbwF84SaURg29Ekf7Q83dbkTWzdL",
+ "nifeEr5FMsJ8P2s9uov3JtEj6GtQAPVRYV",
+ "XzMY8DzegifK476oax5RCrKTrfHP1GvbSV",
+ "2UL8eo2pmRsZm14Wk5GEKdKhQXt4n7RNReo",
+ "2R3kJ93ydK9biwoW4wNytu9atugmZFfapZY",
+ "5SppvUZrFhDJr1HL9RK4cYWpkW5XyMa3T8",
+ "2YaKHDtcsBPDsruivjW3WU1bk4oWYysKWN1",
+ "hqqJyFuJGqraAdENHnmk3tjqn2KCCzNuCu",
+ "doWGCkfbZfPUwtwDkiTUvjqbSMPtF5iqRN",
+ "29p5vKRkfKNyRNzZezQ367dSCmQ2WQfCd8W",
+ "5v1WsP98EtmyFGRkV9eZzmFwhoKYoCUjxG",
+ "dJWPjQ9bK2Q5e4d1R4TN8ELmBvE9CtWUjk",
+ "df4G7zUS3q71hREM7gDeKs2Jdy3xMYaKmk",
+ "2kTitsnyqf3iFck7pzEcBexN2di5q626bGT",
+ "22EMNTEUr5rjgyuNdJDjf4EE9vu1UweXks2",
+ "XWewC5ys9aL2HTkXpSR9EePaeY9d4JqWY6",
+ "2CL9NnXUTJSUnT6rVkETjkajgTiKgxkpehB",
+ "1HdYyhcJ1gs34DJdrw92q47kGdVhJpqGUw",
+ "2hXC4pYWmYAH4pHm1QhdUWu9rAxWSDSiSYD",
+ "gZSgkE59bV8xq22wRKLL9cbAxAybekMVNk",
+ "nPeoMYwR1qcKu8uWkCUBvdtduEapgoJ6cr",
+ "2g6WYh3uDwuRWxdqb8dkQ3pA8DoJuVVepQA",
+ "1JfGCbVEtodwrh8ge1a5BWLCizftsKohpd",
+ "PL6T69QRxPEYhh57nVW5ZP2frgEDakE3yg",
+ "Ek4ZVJPkJcAYMBXmVK77VCxWWzj5dHcUBD",
+ "tDGXNZUk7NhyXVD91R92RPrC8nEX8C4KnJ",
+ "2S7nLTpD8oBk77jbmrK4cbFzjjb5ULfJutM",
+ "NcqCL64x9cLFupo2Cp8VBiEG4jZ6RkweDx",
+ "9EWpoRyT3LcCBx2XDcSVqG24p2JHjZdMQk",
+ "2YjHEM53twokKsv9vNgXCgZPx7yx2f7WUi5",
+ "oVxPm4zQVFB8Tn93fTjDySAwoBLg7qC4u2",
+ "2erUMeaiMxGgbSnGg8SrMtNSCFfKtL1RMm4",
+ "ah1EdcvTbnRiSQzmjmys7bsPYiELZp3uX",
+ "2fJHwfLQHJKarRT7aGspuNeD9foL5mGR8Bj",
+ "JmS8nQL5hm5GPZg2kdFhbKEcAAE61zrGEs",
+ "27vpLnkjnkbxCWCxK4E78SdT1xaSwQFrYFt",
+ "miqaYxDjRhHxiT9pciQJrkns9mbcpXZGPA",
+ "6BZ5cmEFMt3z1zty8zdMJS9fB72DeDb1mi",
+ "257ZuvtWwfdcUWpgguM571CLsTHMdAh8BSM",
+ "2PLGyxDpMuXkD3K6Qxoui5RQeJmTP3JM5aU",
+ "hSmBxbq2bS7Ap5VdkRiqZnopAAoZWYPc2c",
+ "MG1RMWzA8CzHMMLkDZ3M3P5QkPoEGMR4iY",
+ "4HPd8g5iSZ24vyAgsMWCczw2MntXEnjcn9",
+ "FUue9XroCvaApvgtC6vP6aFsS9raujbboZ",
+ "x3BRXMaE1Vq1B88ijxZcfGVyH1L3o3UE3E",
+ "kFt65AqGUcZ6BnYwxkg8ouLvMU65EfnpSG",
+ "EgToqreozYyjLoP1RwJbEwbDL6BxVGu3Hi",
+ "2c8Ma4k7SFFut9KH6zFJDj3AWm49JVwfSGG",
+ "2kjAKoUJaPuvT6LLf9MJHJsipaFeXxvZGFS",
+ "2ko72nZJLWF2uRK8hdgdpA8TwZQAa3XbKLY",
+ "bVTQrQD9nn6FU5EW4FGTZfhGsBSgNmAwRw",
+ "2AGXtMso9sm5P47TwYaVKTrRyK1SdeU3yjg",
+ "LhtGk6r77ExeewBzTMbBbb16mR5Y8Rnpbi",
+ "t47gigvmPnC9a2EdrfbLzLrcNdNUAZQyta",
+ "RPZucq2gsguqnbyAxSMrMFydaFnjtacE9k",
+ "2QJ92zVRimrktnLab8GFB7QUe4dvSUr3oBv",
+ "2buZakH1rVkNHmjxmfAfW2mZXEHncsL8eip",
+ "22wD4qBLR11JoimzcE6miwtgG3ngEbhYmts",
+ "aiS6DEwjHwVybB8eyU9kGq9SN3uVBxRACW",
+ "sPjLfyfnkBidqif8hRywECKUsaSoNb6Yhv",
+ "2jXw7Nu2DgH2crp5f5ZrYTGk7RMSAfSSGVy",
+ "295YKvNdC8r3qDwV2pHF7bRnFz4sX18rLqg",
+ "BJU2EkxAaxfxLQ87uoQtmk5MyzYXyE5395",
+ "2huiXTLXLt1nL1n6oaft9Z6hiLDB1CvoyZY",
+ "2e5741PuQ6t6my7pgJZajK4ZSWCQusw3zAB",
+ "27ekJKoVe2HoLeeBCuvTrwjFALC2QT6wPQ5",
+ "28qetYhMqtMX223SnCG84uc5z6Ba3o3kXez",
+ "pabLSpoZ7R3oD2Xb4S817USF75RdLA58wz",
+ "3xRRkeRJiTSUAAftxCucttZ58KctQhi76D",
+ "2XcuDdxW6HawGcDEUkJRkXvBc4ZdXofcL53",
+ "4o1a5fFaFQETj5dKNDGpFA6s6cYofQw3Ly",
+ "2ZWnE8qXSiitCPf1qoJBGKnsgTZnXZbBocS",
+ "2eMDQHRoXkf2HaE5jnpfwsm2whPhBUnZ8ev",
+ "XTutpLyat2NHDCqtNU5ZeEmsyNFf5BbV2W",
+ "7BMFBvJMMCna4qWGU4xA9fCBdc6du76tBL",
+ "tN9hPYbBqqWm6oywzCEnttYgevHt5EtpJi",
+ "2Vmv5mgessEsoLRzRVHeErhGh1BgbhUX9vN",
+ "28nJZQJXcyCXcRQL68SKpgigDgXxdsxKwJo",
+ "FutR6b55AZEbmpjBxnvw43SKgJ9yz8347Y",
+ "LBP6mi9rz4LkK5uT8KdGoZoBBWNQ4EcMXZ",
+ "2JUkgNUX6CbrfHA693MaVZVg2EhmsFjBCE9",
+ "dDurygy5tph6k6CbenucjHq2pMeDY7LxXW",
+ "8Arek1skSnERYfL8DJD2aoJoyArjwm8TBi",
+ "2hsoYhBY6XhfREDLQsJ59hQ5fbrVYxaSt5C",
+ "2fXEJLW8ryBb9wyZFyGAp7vEjNPjSnTMLo3",
+ "U6sW3LibpVwJM2UA6oCVMyNS1up9XTH9Y4",
+ "FL2igxLzxxe4v9DgtLnZeAPvcnimq9S2h5",
+ "pxLypSbcXC7mFn2aL9sW5w2bEiiwGsSAnB",
+ "PXoDpxstsNGarHJbYym42cQPxRUfrhKPng",
+ "kUtHWMzuSuXRjqDjCF7a5Bu8HM2Ea5eXUi",
+ "2LAfD9yJNeaBAdhyKmD6S3kMcegV2J7J9Xp",
+ "8ZP42ww8jAJRd1sYPaeGVN9XT1amDEDVMy",
+ "2anN6ZShHz1fsXPDKpyHZAzTjomA5Xcd1f3",
+ "LrjjNpBM9QAxKYCqtsVd1gBFFoNKKFHxrm",
+ "2dktQJfAcAHcYznAXUAySxR6KDyE4fSZDok",
+ "2RuYSz89YVpxbbJSiUzAh312hFSHgDgejbs",
+ "2E5E6krKnyCiMKq98RSwCMxPwSVVGcddL5m",
+ "c44rh2ch6zmBbm5vWbTQyDmTic98bn3GbR",
+ "6kzn1ehUgWAnf5rKHAa36nFTTDg2yuE4Ge",
+ "KBo4PbckAGLcGaww3Q68HVynww5obFgfUM",
+ "sVB21HAJZaSQYmoEvPzwW3MaL8XdfqCrdC",
+ "2Xx1gA93rtLhdC2nYdXHDFgoJ4vqMVEGUGK",
+ "2ht9CvsP3K9kJGxw6tvAVhQNYscwarbr5m3",
+ "VM5iCziZpxRvcoKht3PEp6xjd4iC4Ltq5R",
+ "zHpt3Hh9BnYSVDBQtjgrSTbxx6TX9NK22i",
+ "yQZxBn9cDU16NacqgqG1E1CN1bVPvFpHP5",
+ "26njdygvPE4qwZZ3Yj8r8Nr6kAPL1ahaega",
+ "XE9ZGsSoD9D6yyc1fmSWVvhymHtALc5gq1",
+ "uqXZWnWQi8924AnD14Qefo4Cqp9oyD1fdg",
+ "A3kW6XJAtocb3RZzxp1hhKUi7x3prwARst",
+ "hcRypWRZ95k7W54cDEQyhXbo3jQJhntSxL",
+ "R19BH293zm9LtwHUmYbwY1nRMkdiNdcjUW",
+ "y6vz4cEbAYLxo5YpuCATXmpBHHXp8baW6k",
+ "MVXpZvBBZ1hKXpoBb6ADEZ5PskhfmQoAY7",
+ "7nxQpV5JTLdET8BrS5u1QBKBdXdoi1n8uu",
+ "dNwTPXscUyB8DDDei4uhaFNVn1zwZZXSXV",
+ "HzqUKWAye8ELT2dFYJXSh7nxTEaGmKwwqQ",
+ "E7ecz6ByxYPH15qi61dZxVTnGVfiVSKXLz",
+ "PbrcqhsS7nxABkacLbS5D1TcGNmbQjNJvw",
+ "2AbQzSf3NJF24Wbz1Q8yoNnb14XkHQS4Skk",
+ "2feEwyW7kthjXyDA2PE3Qc8zK3wZiZmpHb4",
+ "2hujgC8BN6mm2ynJEWkUr9u9PRTmGpW4Quh",
+ "2ZQNF6xDTS6u3PdsFZkUUw92x6X2E3heG6Z",
+ "2PYEWoh9vtehcTiXk3aKbaU9MSKzCFmdEdf",
+ "VbAJiS94usC6xc2g9gC18B8jJ2KJgJtNdZ",
+ "Db9Psfwdn7J2JsnLn5PSVYkEP6WtMPp5H7",
+ "BTmPvxVKb3o7B3qP9brwomqcaWDaxG4JTB",
+ "2AZ8TkwAxiYE6dp6MmEdajMCJFT9mJ6SCsW",
+ "eCirqjde3tABttJq8nWQHhEtybfKBGiYRT",
+ "FtuE4z7jeWMMT3GGwyrzc49w6EfVpx5ACL",
+ "2JbSrKi8iLmGEEtAiaRXv5vjgf7hCDxvfae",
+ "26CWn2LeVZf7nGb6zS1T2Qq1sMbDMWpDfSk",
+ "WrNnqNhLJRaSg43oNRn27znvUL85Faj2G7",
+ "zvhWMuLG3yn4DNSWnZv2PC8KL7uf4zzA7H",
+ "2Vv1fmjXqwEtqm1sFmbVCQFEtvRR5nhYic6",
+ "nNEjceCZGZ39r1rEDkNNi1tvF1vFeHgjd5",
+ "qNuPoX1Pa2GTL4VTqtST5mFUH9514nCiYB",
+ "4aMDuFNdZLL8GUHwXynv9yRsDvkT1eYwbW",
+ "2BJ1a7MMZ4LyLJHyYmUDJXTz4BwW2m8NpD2",
+ "2g5HsThBpsWyBJCH3vt9n5sue8cY5F6ShnY",
+ "2ANR6AQVnBP1ELMF888y9bvMXBGotyPF27d",
+ "U4kMG3nE55vggZhY4g5W2b7LJFnri38nXS",
+ "vP8LuC5f3JsyuxdNnjQE4cYX1x1Jneq91U",
+ "MnedMSKUUcFWDH7t6wQCHUVbLpYCVz54E6",
+ "2bYCbfrHuKT1qoipEF2WCaqYF2VLiWVmStr",
+ "21g3DaraHoi3ErJPnqVhmAUhEKYjaMjUX7z",
+ "KycARv1ZHFhHjMQRi8USeT3fA1vfpvixg4",
+ "PeVNeU814LHqysUkWjBY4g6HDFAWuvWU6g",
+ "y3TRuV9sGEkLydFFeTUv63rbKmD8P9tdMM",
+ "Qk45V9xFiTBGzc3V5KuthUYvm4rwsdCcjM",
+ "v5DJHsHz9Hxk9qtjGrWhhBirDF6jxAQcw8",
+ "2YunxooVtNTGUTuU4nymP1TstdePX3CjgfJ",
+ "qcUNUmuC2PxS349R7Zn3f8rHs5Zr54cTeF",
+ "3oizXQ9A5Faaz2eUfL2ai8VSZQrKUrq4KZ",
+ "28AfnQzpVgh5PvrBBktYWQrFXxzeNoFeZab",
+ "jZ3aqq8Gpvqx6EEyufA9XbwAtaeo1Z4XTW",
+ "2KAjjMvCdY8hD1NpYJ3N4PoqAQhxJxsLW9X",
+ "2EXCRGeRVSG9fWMa8TAdFozZ51BM5wQQemb",
+ "26SrUQWBqXuSiLJtPpSuEHQAmKKrSopVanP",
+ "21TCNYspXtny11NsSB3XyHnmKhHMYb4kCQx",
+ "xHj8ZYniXjXxt5nKNP2SpHAS9CHecYPEyE",
+ "ooV4AyjEuTxDbGjRv9H28EQNrZamr3gkFs",
+ "2fG1VLNPHRHx2STRwmzRGRPYUMoy3crF1SK",
+ "7Uv8eKHSRSF2v529FQtdSiqj25KrwUdtQr",
+ "2m2zaBUFoxDisNJDvYAiHpniVzhPKXyHKer",
+ "FWZeG7EFVAPqPSuv726QNoPEkJTfMJs7sW",
+ "R3z5YsC9UdeyateCNM6cseofr6mr2YHiF6",
+ "2bEee3HGuCFBEXfo5ptYvJxauAX3Z491ne3",
+ "YUcseh4SwGq5s1o1z3Je1AxyvYxbHaXH1M",
+ "2FtFHEEjpUZABFnYUdxfwJgN2f8cNshQBNK",
+ "2A2tQEmDSVu4r5e4V8XpHjsU7sCUGKg3k6H",
+ "2iEtyRcCr8BJDjZPRdkhmjs9DPdVeZAXAv7",
+ "2aDtoXE8n8tqt74Nf1t4vewhKni96mxs1Hd",
+ "28aWLBqsAWvV9FZzWGyFZbvfCcbXASCtDs6",
+ "2TQa7jL32GxVjpDURjA7MA9ttoPh1LxPac9",
+ "5D2kD3VjH3uHRPZR1KGsDqq1pG4gg1L9pH",
+ "2jCh9sVAttBSApgpQbnXneWDmEABEcUWes",
+ "2HWkxMq5wse8eMzdSg7SFRGdL78dhCrUnQF",
+ "2USF76oow7qg2JYZevCso3MVZ6oiLrKajnT",
+ "2k4dhY4LoyRm8wNFvxpSYWpzNUdEmpNwAeM",
+ "BYPEpZi8MMavCg72qYgrjfamrMefywbNUX",
+ "EXqYUC9qGhkocaAS977mYiiHTzxcE5iLjP",
+ "YL1xTa11Q5P8W9dvDhotR7HidkEt8cqDTk",
+ "DcLFpRpQs8DnUvx1nButqKjsgrVJCM3gA4",
+ "tUyQ3qtuLXY16DA8ChmavF8k2iSghuEZba",
+ "2bSgdeeuQcACouNKCHr77Myc2UckCHgLwoa",
+ "2LuiyDaLRELuPVoAt73U5jxokVhUzQVTkY7",
+ "tJp2cDzzjc3thS1peTdHqojjoeJHQdmWVw",
+ "24AAqR7Z84r7nWBvDsCEF7WLKwPhNsrZTfd",
+ "QR9XKx2hcYwQZSn7s9uZdTpuPhCD3SPU8Q",
+ "euPtyPekuzTWm9MtsEYtwMSfw5X6vfV1n4",
+ "2TmLQvUuow1UAQ5ZQ648sJfMieajJFyDyee",
+ "r8W8463DSkaeMXsHVk31M3H6fKf3Tad1da",
+ "2P5bpaSiWB4PTu5ReLmzrU9F28Q3Nzi67FD",
+ "M6ptsP2BtArEYywwfs62Bm6wpjvgi8KUHf",
+ "2STvXAePrTriUngyEB2FXgHyjoJjovDn6CL",
+ "2h8SZ7HZqChSidBXBR7N1KFRBrM9MgQxhkf",
+ "2daoffo1BrUSQvySftTVngVk1pRXFwmUjzd",
+ "QyipQphGo6eMWZJcGYXjN7tVNNRKXXGJfZ",
+ "zHLNxJnUiW2Xu7TQkXodaGXVe5kcPrrjhz",
+ "fbGDKVQwg8b9RZmmA7ruce82t2LNvPtQ8G",
+ "2CezKLkxkZk3uQs5k9YrunQ77GPVhz6xWXb",
+ "295jZueDnavv4qNSN3KFbYME4N35hirB2z4",
+ "CFkEWjxcuV9HP3QFQbozd8tiPq1UUNBVk9",
+ "uRJaiydoRpcEVMMDPaLC79Udh2kYLeWtbm",
+ "3aKVUkAw2uwD7yiVYoYpRhu81kLTySHpNX",
+ "3WiEk7ZmtwteTTuytNMf2xTr4Y2zcYc6SR",
+ "YFhB65fLnp23BdvCcWuZcBegQ5Gts8kbUH",
+ "2RQaD4ex4LBKRSTAYhrXnZ4w7cMguM3dMHN",
+ "sPdiieYPLeroaSec3qmuAmP2VrZp4YfvxD",
+ "29bB7wKVhKhBYpLB9HLt1ehbVRDvCjkxSXp",
+ "DvXK9casvN59sojtam1rYVVENAgKva1fC5",
+ "BRcWLz687gDQiMoGzdH9ExNXgjUadhxDbv",
+ "CfXTPXYceco11bEZ7WpwzXsu9i4buiGHdG",
+ "tvwyK36nRQUbt1TJhaNRKWnTYJLgHi9eGn",
+ "xVyUggaKkYhNiW9bEaztZ9epKY483kbn5x",
+ "2dWFKMC2c3oUksE8QmRWwo8K57iW1SPTGXs",
+ "MLVKyKDSbirCranKJFmiTnHfdHvtf4MaMb",
+ "27vLouPtqdFmrCkECctwAt6zQ1wpQ3vbCJu",
+ "2aScLc7V9im4HwnUa6FsXcNFRxMYZ2i8evw",
+ "c5hqkgrZCN2U3DdpXNaT1mghv9qWjLJqoE",
+ "2k3hToJKR62vQbNEHcj8FFn6ttgwGyDRAA8",
+ "2EiqFfDYciqBFcHKECQTYAajppNq14a9DKn",
+ "2N6awRsijdC6ZSrGsZStoc4n6vLtTuSFi5u",
+ "2dp2984zEz6RYFziJo9oGmiL9rNam4umhv2",
+ "2aZoaj5o1Ws94ciu3RGamaow8yotwk1QoZS",
+ "32jJQGued6qthCZGJsjeEyErVRBXW8YERZ",
+ "2k2vekekt5fZGkJDvcuL2y9npEsNGRyo8tx",
+ "Ht65zgYDWE2KLUWxKjVkeJx83pS6xGHZwH",
+ "W1i9YxYwzwDS3wG2RwnV1cERbs7ZTNyrMn",
+ "2gRyTf9pXQigUxgcn3HJT3fk9E3ur1vJkX1",
+ "GRJrCk8QMVuM4CYfFiLM6v5uj1kTA51uMF",
+ "fS6d7x6FGDsJVNsqgp6hmDDY7zXHFvLDVg",
+ "fbB3oweWooiSniSPN2Uhnkk7GQmiD54FUV",
+ "2eGzpSwFLVxs89eCKDuFXnRK8PaPr65GDD5",
+ "2AZ5xbx8tRVyFW7VUHw9uCzfFQSAtFwcU1A",
+ "2ZMrWv6PDN3wu4ar46WoiTqrHwrKkY7w4m8",
+ "22GyZB2fGxVsVkHstTNCUNpSt1ydhjfqwTK",
+ "2g81ii6LVKj6vpcuupEcZV9zuPVoxVgA9Mo",
+ "uoCGmWNtezqosyaKY6kP1iw1p7xfDG16wH",
+ "2aUzxeDp1iewqNjr72QwFB7DBx6ZvgTUEjM",
+ "22fDSGFdhti7Py7zykMqbja48SVkQfedsBA",
+ "ixRXVfn6hMRUsdsJTE7GgMm3Eid4UanKcK",
+ "C4HyC45crxL2Dy3va8PFhJEayicUSNcmPF",
+ "2TEgtqLSYXH9QVAU3H3NsY29begjFLs8iFi",
+ "23tWavj2UgBg1Jsk3G8Ns1HeYoGkqtNFis2",
+ "hNHihCGa73WtGv9cfZFrbqorAH3pvtcuuZ",
+ "2Lwtn55kqb9d6BNvD5ecJW7yBfZTxF52rhn",
+ "2eMfihWbmnvZ4KvSJPbRiSTTL3oFv3nuMTL",
+ "2JQ4BWC9BTHUUSHpd3Xc8S34xrenukdUSUv",
+ "2i3ZhPSdnp8NswPsw2p9883cfm4zqUWkMFN",
+ "44uk8fU7k2gFqerwkAKTFzWvwPayWmjTCx",
+ "2gDeQvfwJnmeDVtQt516hUbxUK1xTZfraay",
+ "2GtoNJMcB3RBxvNq8qAo5xvZiFHSJ7dGKi",
+ "289R19783EYott6pMQAPCCA5MkKpDSrUBoN",
+ "BFP3PK4WWqfD24dLp45tvGWWg6RznWRF9X",
+ "2mPjq2MP3jDs9hfvjJPewuWui4GurBUmcGV",
+ "2Mfb2mini3tccBzstfLvbjXN8a32pv55Gjq",
+ "eGmq3kfnftYqhv8igCHdBfYPfiSJyjL1AT",
+ "hMTQToHnS8wyu9mmt6KtZqhZ4JxSJrbGxh",
+ "VzyfHd1eos6Vsmhikpr2iZPHtdzMKSYM5X",
+ "W9Zgr6oM5baXiQSVDFCemfNpoKCn4Semhj",
+ "hJtq5WKgFE7xWJmoHwhqi9S297YwKNxpSA",
+ "86kvVQHJQMZrNBHctbqmuWNvRemU1RhL88",
+ "PGgDzvKoVmK7MVy1r9gFPKNRMRAjAEhGAg",
+ "2WawfhEctHJCXbX71vXg3NyECnZYmjpHqQS",
+ "2BBtShwjUwPD8g2kXABNo8fNfNB4zUL1x9M",
+ "EnXRAsuNMnNPjXE9zJDgamtdnjAcgE6qB5",
+ "bRMDHVuxgK7nVWRrHbM8m2387KS47C2Sdo",
+ "HMM8ocaqGax5YqB1tgx8xxtFiX2G99AvGa",
+ "wqMj7Cdd5AffBtNbd6hT7Kp8XvmTB8w3wz",
+ "VTJeVgM7vdHCSCUZftuADNgEubFWaBnmP1",
+ "3UVQ74z9hLVxV7hoghseqyKgGcok7uR2YC",
+ "2cF8jAYUp42i8HRJPu55GwFKor1Ur5UQt1i",
+ "2FuLm7pRUJtmDox91PYhLjuk1VX6CRYAdSB",
+ "2g14ENUNkP77iRgT9YNZsZvmg3MAToEe2iP",
+ "v2L1JbbYFAZdVezpZdw9crruveeaYNmmqD",
+ "vwT5HtduJmX8vPJCEdKps8xm1EeXdSP4qR",
+ "EJQdcnbL2qRD7DSEFLtKYntAKGuSkvarZS",
+ "jpotWYe8jjXErZZgYjbeRvVjav8Wi8Raqs",
+ "QTcmdbXUHgqAvnm6s86i9gXX2YKc87Bcko",
+ "fDFrEE4eZJ7RKSj2zMLD5b9FycuowTgq1p",
+ "zg1vUUgNRKPHua9Z3h2cjrComgyvEfzAjZ",
+ "2FPvP8Yv1JJr9ufeZWTg3Lf1qfBgTGatMWz",
+ "jr4ZwmRgGFSD4ZH3hNhZx3udHp8LdvanAf",
+ "msZD8PQsmt2fWufG68VGzyRRi1qvpsJdQ6",
+ "2YeyFYSEwvA4Zif3US662Wmkf6VEsJHKJ7S",
+ "DbNKRPLiiyqMuC75W83wzMsrf87Wcvjt8Z",
+ "2jS1HqNnnFKiPsitjFmMgZsSJ9yxXy7ZPGW",
+ "2FE453QFdNnjJ8RMA7QyxtsFGpi4wH2q2i3",
+ "25Sf84m7VBrdMQK1kLDF65ve22bEX5LGxua",
+ "xU4LQ1vRi1dyYWFseNnPFWyJeYMtohyoLZ",
+ "Ms5uBZoBoDxH1iFGj9bGPo3kpEqSuLR9Z7",
+ "cngBTW2gGvUcGkxtixnRgJ1fnj9GrABYSg",
+ "mSWApCCUJgTpT4HtABQDYSVupEd4UoL7xf",
+ "FjzvUD45SqbN2Zv5xRGWN3SNgLG3B1quPB",
+ "25ddKzdgp3TmpoEyqQTPc1yqrYfmcsJqXac",
+ "2gA5kcj8gx3b4zhFXN28JHDG5obB4wM2ZJC",
+ "2exkzZ39ZXx2Hvt7XMAbAGoMb9kRetQC9xN",
+ "29VCcYNC1soY2RnyjjG2c1FNhYcxdPPmDwV",
+ "2d6CABNcvWznWgxunfjqt57AD6N5VjbtUnG",
+ "23kXjScid5HCyfmvtJZ5pdpA3PucTCxiJoj",
+ "fFRZH7xwVihKMGiDUSUoWZMrvtQ9j3awxX",
+ "GxFd4XqpLQQWyaH6Hbtt1piCcXJ3rqdjZY",
+ "24iVPSDSRLktUj23YdXp3531s2Dbd4XvNpn",
+ "dQkEJxUmR9MRYKz6iVRa1ms22q72nYn4iL",
+ "dnjXVAWJv6Gt58oNTtHD63Z77uVkRfAHos",
+ "qckoWihwox9rv33CE7e54dgWLaDY4itqrf",
+ "2D2dkjUMVgiR4yU75WvedcKvQSmwCj6W6qk",
+ "2BvfGqZYwyFaXE1nvzsJ7Pei25x7v4d41yR",
+ "wzxU9YGrkjLFuX72F2YdbsRm1NFR5D5CMF",
+ "2YbDyeXPvC4bHMVczmgQbvY6gJUYpxVRkB3",
+ "dFVDtdMt6R8ZyALFiAvBKTTCqKBsU7E3DF",
+ "2fuX8DA98usnxbChNFLQV4iLE5upuWup6W3",
+ "j2YAkSPuE3pRw5jEwozATxZGPdYcETedj",
+ "wsBi211Y5PXahQaxrwKU3P49ZhQbubeZVj",
+ "2Bh7Dh4JsMR5wNttAiFtfoK8oBcbGLt8nDg",
+ "21cxcry6r3Y8UU4dfQUJ4bNqtgf5LWZNX4b",
+ "iU9oD7yPQPzxdacx1GCuaypoyUDDVUe4rN",
+ "2JgLq6FqPbKiQgM8u5JpgcgiQzn19psNfgn",
+ "2aSVoqpBng4KFqKQtsQVyCCf5zKqH7dWHEY",
+ "22Wi1Fno3wNCDFnZfQjDNoEbS9TDpNNq78H",
+ "Mhvb5fHdboaap923fJxtGXxZEojtY9VNU4",
+ "Pe6tpFpqsEu4m2o3FkDmDpHU6w2dCB71fk",
+ "2f28YxxZCAGkVxkr6DVRWxTvVzKue8HPnSk",
+ "nqAJNrLP96e1WJofgHQJrtSA9gTpM5L86v",
+ "2XD7UGAGJupLTuk1Yj22iQ5CwBEdykMBsP7",
+ "LKa5BryDBB7ug2dZhrncoS8dz2cUv5K4Rb",
+ "N7RTfu8LMhDEQS3eSeJjQzBuHU4Yj3rLwd",
+ "nr7MwYTRWKKB8WFshUmfsDkQrjrax2H4y7",
+ "2QocRUM19a8LVT7CnjoR6HJWXntXDJsmmDF",
+ "58FY3KLFKkEBAWna8nH2oquov9rCyY5QJ2",
+ "2MR1MzzQzfD4AUCoXurnjdwxwhgh2mkcSwj",
+ "7uHb3zmZTmSQeSz1HARZgZoDErWuDhhXBA",
+ "icXsPHrk6NcLhUQAyMxZTFRegNLqbkajkp",
+ "XnsKrf9qkQnZAD2usiUvF8aKCXhVb5Da7d",
+ "oEBRgWgDt2GcfBCcWMEpJ8RBctd9yFuGAg",
+ "24RGKPsnsQCiVkes14R1RdDEhscy7meGA1f",
+ "2hyLKdVomKv2Tko8hEeQMkDn8HqaeauThcw",
+ "2GMUDBMXe6hepJV7uefZwGYiB4invamLZ87",
+ "2NSk3tJpFCuGybSR3bNngirXUeykPfVHr6e",
+ "TtBZd52cZhhjdJ7t5QXUnPXo3eYkQVuUuR",
+ "4rkTmJ82Db8tMxb2yZQtAboVnUUUBaBy6k",
+ "rqdH4MwHbFHUVUc2692jXtdXZqcAhxjmkh",
+ "2Ra18ScFZ1wdv2gTq589fQoYDfCJR74k7HR",
+ "JQz5ohVZEt8tZXTsPdMJE4Cpgb6WdcH9um",
+ "Ckdc3PiH32snnm1PFMwcjjEbLn49evTU7Z",
+ "o69vXRUxV8aX8w3FDauDjeVd6RcDcaxV51",
+ "2j4DCNfztT2cBPBdFEaf1Unk2HjYRA7o3bs",
+ "24Y8B4NsqZo9PH5GSeFqWwRaKDcwSpRwX3Z",
+ "htTbpfjWK8gm2wmpy4FRnLPSTpe9BeAVHH",
+ "SqGxx6t6iqn8AZsf14mUBmtUT11A6Zq7tZ",
+ "2QBb8ikJsYv3WdrFHjTPNF4PuLgwkSgqvPf",
+ "2Qb4D16HgVzNndycVjZXRrQyYzdEe5f1kEt",
+ "WqEbGKqH94Xtkzjh7pGWAK5v3UfspRUpfg",
+ "5aGZPV7q3vowUbhS1VAhTCXtouYc4o4srj",
+ "2SgENwkuMyKh2s3wdufjWg3JW3sCSjwFaRr",
+ "2W3k2kdexCsayjWpP9wnK7JF2Khw61nvhGa",
+ "4KYYn3BErgD5eswHs3rsEQZZWpsVYkXHUX",
+ "mY1ZbgmxqvP1JFqdq4YzC31SQNd8oTGw2c",
+ "qC6deJVWhZqKPbRYW7JXeiUNf8RjrAZsD",
+ "2YKjavRXaqwzauLALMV3vECrtjBJXVd2CRX",
+ "2A3uXa8GWNomPRJmt1KspWpTuYgUJbySkwx",
+ "2XJe7KXzxUoHXYdAnhkvDTr1EJU1xqhP85Q",
+ "NfaqAXcAAri6fjf6PMZGMugwiwiYHY1Q7z",
+ "2T6Qhq7uWCx1YguXya7weFtgtChhaV29bEF",
+ "22xPFCd49usGrtNyAbcsmQtyCDSUd9f7N2U",
+ "pPsLH84x8r8ZJat1M6cBMuh1jcTXnq3sLf",
+ "kyACjo6QicoctHQ7AyRxTCviFzqJKstxBK",
+ "2TZk4CMbXby81a6hajgeQRQoiL8Kw8YmUuw",
+ "Ap3j7fzSCDYeLeaqDyRhngsaZbvX7GwyRs",
+ "UvDfq7gGsCxVwcDGag3kjjT5g5AQht3J32",
+ "xKZWXzMkoHLyGYRhVpRtAmbJU9qNqHiFp",
+ "KfUZUpLCAoASRtSgeFqeieYBGFBNLTQJFV",
+ "2eit8zm9DPNrnhoHy7zs8Koz6Jg2TpS7sLu",
+ "2cJEWMdNjZaTWttc3yqsNkTaMyQcqYSinyu",
+ "BQJRWm6W9zFXMKaqAR8Su1TWXNvyN8PRVb",
+ "dPpQ91cFJJHGLCnK73re86f3dZcvUbsptA",
+ "2Tn7ndbNiTtAdcRYv9YdVYQKm9wzSe9BjP5",
+ "dzTmuPoa7uiwtZjTmyu79f4UgniD9mFsVR",
+ "229kyj2TCuMjjojymiTe26qxguA1BkZwSoZ",
+ "2bcZkbEbtUSe6J18F2JXh3uCJ7sVNg2UmVR",
+ "2fu9DukFeEyfGVkAfb8iWBFy2mDghVQUcj7",
+ "2fxkpW9HxwHU1bC6z3dzHTb6gEWzoy8Xs9L",
+ "2VvuBQEJBCTFRgJdHWUxgLonqC4QpnsfDaf",
+ "24vAY9PQ7Qi2R8WyS8PNqPoE3ao9GoHPRZb",
+ "qkv9hF4p4pBTtx9d9gZsE1umdV62auJw51",
+ "nqZxyANHHZRGfMCX1piVVTxNpyqefaKDYK",
+ "yXWPxHDZ9H692aXryEcCzmvj1fq9m9QEe9",
+ "5eryPdnvguPh6RMt7L9ipmiheNqNfDKgFE",
+ "235epnEHmGYLDUKt9VkUmtJZhd3WMYUEpNB",
+ "21MiQWFa8aPHmFD8g2SK9b6V3in6rPmk9ss",
+ "sGKq1txePv5iQRe5rnPLVTTKttCs3zjmDz",
+ "2LvUuM9G6TamL9RuVxvyvGVqjwHiiYBRotZ",
+ "2jZGHc1LKCie4agCG71cZYe8Kq5611fLfAL",
+ "21YArvzZEMNxN76H7S2bbdUDA7ZXAXoeGqr",
+ "sEDoamsSRKqRZtGf6G1GdXh7vR58UvzKgC",
+ "2bWPKVnhPpAM7gn1QUoyJNwwSvqBNMh7GFm",
+ "2juNZeS8hvQWVtgBtAkdyJh9oZgmaR4u4LA",
+ "2jEMQeDGuNs9Cjjbwnd6zfX9j5Cgk78qZQz",
+ "2aZLbTLJNvHN1KLS6bfedEYqqLk1NedrqQR",
+ "TexLgwbxQtVkZreqfd8ts7MgZXAxsKhqwa",
+ "fQ71HRkSJasCCMVJmAevQa5fpanCuoegUh",
+ "EQLWYk8bzDzVpXGHeAbs1EkS1bxDyStqJq",
+ "rHimzsNiYrjTDi2eyAmMVE9BKBNqwBzv3X",
+ "2cieRLhQMY6636tbZnwfsu2B9pxjJieh6u2",
+ "21MbG627zkijtP9EfWvdqPytiP7E8gZs4e1",
+ "2PuYo6dYwkNKUSPEPi7NU1Q4Jseg3sPYc7k",
+ "2jY2JbZJWzW5cF5uPnS1k1CXgQf1s8dkbdt",
+ "2RH3mjLtsqkiCAg6cauiuFaxkvosTBq51WM",
+ "Lm72QmciFF3gvn3XttqFPQroPiaC2j4qnQ",
+ "2XEMsTyFd5DLA4MHaY67YLpxKUxMeRTmCKG",
+ "4AM2uaS51GhjomXqtDkQtovc1jS7iMRETG",
+ "JMpqpGE2eykKCWishNiCXXaN4agU5zpw2C",
+ "2SjQ63GgZaMy5ChNipekx9nC6B61Z6zyhE7",
+ "xZryS9tNPZDcdZMFgtdGEWbzvrURYMHgqH",
+ "W7GgbZWJLV4z1mWxyuYGBZabgPCfvRcVAP",
+ "Sqsyw19LsXHw7jkTHEANmc9sepQoNFitCo",
+ "2jPE5oFdGbWtwgmZxTt6uhDyHVeod3yphN6",
+ "6pUf4J3qUcQcRQ6Zpm4FD2XiYksWhNLVS6",
+ "PT2XJ2hzoEMEMNrBoy1Cy3Kp5nxyqygkh5",
+ "96NQ8YvrHTU9y41iUYMqM1Epk6JwwQcmJu",
+ "2RL2yhBwN5WreMkchJrn2vDDWJHQcRtEaFt",
+ "2gNoXa9m67NoaTy6eZ4GMpBmvdcQ9nriUop",
+ "G3n5VmgdwuwX8yk9cWpPEp3FHKEey5YmHa",
+ "kfaNrSPWjRAepn7QWtdEMABi96MsihhFnS",
+ "EHUQJ6c2wsSbAKB6ZcxVLWVf8Q3sgNUx3E",
+ "84wW1qSfVA5g8T2CiNTdxJ4JqfevRghgpU",
+ "2LTs4BuSTWjHivxPuUP41w2PA71u46dCz2p",
+ "ErnM32JwBMDYJSKNRM9Y8JtWoYFZ6LnnhY",
+ "5Tw8R7YdSBTTVj6xhQNi9GZ1g7MR97inE9",
+ "2RDKpGhkpsPfnjE63BdfhrBvkxLuSipQR2m",
+ "2LAtjkk4wS1npFGEdcTpjgtNBmSS6u6ArUo",
+ "5bRNsJqBiF5gvC5nQnqZ74nvcKtDCwtPGU",
+ "j6SatM2gF3R2PQVMA6PUPu4D3ZbKQJCg57",
+ "2iA7RoAcMhrtfJvmqM9azcmYYrJXXXGpxd8",
+ "27QyjoaHdbBYqUTXYYB1eFbUL66SxYpSs2u",
+ "u4yJk7eP9oPA3GYRaHwurA7LMsjVx4sBuK",
+ "fBnytDvmJgRrVJfuUDmzKoy1NoQ3m8SKQz",
+ "2FAS9ewd3wfjAyNhYuXasdx4udC37mcuhCZ",
+ "6u1DL9SoPYKGp2ighH5rEgam1F2QRumj2T",
+ "2ZVj2H1qdvZuBymu6oxR16y3uAfas5AiVUq",
+ "2WhMD7E3AKesy4nUMMEp2CTrmE9VPukdyAs",
+ "227F4gBrr8fjR9cG4MAjnLb8bvoXTRe3ZWU",
+ "2mft21a6b9xMdXAAURPSoQNaLCsmPc5hqZg",
+ "pyyySNdtVfDm8gv2TNhhrEgdNZgmMat5bL",
+ "6MfnPVFmhXFJKpFgqgjKpfTBGCaZ2ZT42T",
+ "jNQSAN3UfGdfTECx4ZZvLTYGhEX7ZyxsJD",
+ "2g4apJrWacF6mwKUnvzt5QQV9W2Hmb5NknG",
+ "2MrL5PtWHqhsnh2oRnfLp3db7eCQTFUitD2",
+ "92dAsJKwHqGbprAT9EMXjFc373KpotysZn",
+ "2AwGo5UKYbKnwSg3B5aRzrEqNW7weUpYver",
+ "8ieRD9NwcKpxMYz6c7kQZ9Zke33cbzHaew",
+ "21zAWa4MmzNW1oCaDdpCo5PQDgXNiiAswD",
+ "6cJBYvqCfzaegYugFKbLgyHrrfYKo1X5Xr",
+ "ZGDXhCBa4BguMuqMfimsYXThgY3aQU85oR",
+ "23DeMFwzFJdBNhvQEzSKpcJMtbdQ3SbuSq9",
+ "SLzPyGEMg5U7piPRjVrvpTqUP8w2zaYNQM",
+ "2fzxDPhzcLVNf4ZccVuAHqMxYBEP4km6mr",
+ "FUqsNrZpKCY8uXL1yvUhycbXuDoWmdG5ZX",
+ "YL7KAnjsBPQHobWAZzPq3E9MSzviErY6RA",
+ "YxMo7jCkihzzMR3KMPmWkHhK5464B2B6tC",
+ }
+
+ for _, a := range testAddr {
+ t.Run(a, func(t *testing.T) {
+ testEncodeDecode(t, a)
+ })
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/bip39/bip39_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/bip39/bip39_test.go
new file mode 100644
index 0000000..26505a0
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/bip39/bip39_test.go
@@ -0,0 +1,553 @@
+package bip39
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/skycoin/skycoin/src/cipher/bip39/wordlists"
+)
+
+type vector struct {
+ entropy string
+ mnemonic string
+ seed string
+ err error
+}
+
+func TestGetWordList(t *testing.T) {
+ require.Equal(t, wordlists.English, getWordList())
+}
+
+func TestGetWordIndex(t *testing.T) {
+ for expectedIdx, word := range wordList {
+ actualIdx, ok := getWordIndex(word)
+ require.True(t, ok)
+ require.Equal(t, actualIdx, expectedIdx)
+ }
+
+ for _, word := range []string{"a", "set", "of", "invalid", "words"} {
+ actualIdx, ok := getWordIndex(word)
+ require.False(t, ok)
+ require.Equal(t, actualIdx, 0)
+ }
+}
+
+func TestNewMnemonic(t *testing.T) {
+ for _, vector := range testVectors() {
+ entropy, err := hex.DecodeString(vector.entropy)
+ require.NoError(t, err)
+
+ mnemonic, err := NewMnemonic(entropy)
+ require.NoError(t, err)
+ require.Equal(t, vector.mnemonic, mnemonic)
+
+ seed, err := NewSeed(mnemonic, "TREZOR")
+ require.NoError(t, err)
+ require.Equal(t, vector.seed, hex.EncodeToString(seed))
+ }
+}
+
+func TestNewMnemonicInvalidEntropy(t *testing.T) {
+ _, err := NewMnemonic([]byte{})
+ require.Error(t, err)
+ require.Equal(t, ErrInvalidEntropyLength, err)
+}
+
+func TestNewSeedCheckingInvalidMnemonics(t *testing.T) {
+ for _, vector := range badMnemonicSentences() {
+ t.Run(vector.mnemonic, func(t *testing.T) {
+ _, err := NewSeed(vector.mnemonic, "TREZOR")
+ require.Error(t, err)
+ require.Equal(t, vector.err, err)
+ })
+ }
+}
+
+func TestValidateMnemonic(t *testing.T) {
+ for _, vector := range badMnemonicSentences() {
+ t.Run(vector.mnemonic, func(t *testing.T) {
+ err := ValidateMnemonic(vector.mnemonic)
+ require.Error(t, err)
+ require.Equal(t, vector.err, err)
+ })
+ }
+
+ for _, vector := range testVectors() {
+ t.Run(vector.mnemonic, func(t *testing.T) {
+ err := ValidateMnemonic(vector.mnemonic)
+ require.NoError(t, err)
+ })
+ }
+}
+
+func TestValidateMnemonic2(t *testing.T) {
+ m := MustNewDefaultMnemonic()
+ require.NoError(t, ValidateMnemonic(m))
+
+ m, err := NewDefaultMnemonic()
+ require.NoError(t, err)
+ require.NoError(t, ValidateMnemonic(m))
+
+ // Truncated
+ m = m[:len(m)-15]
+ err = ValidateMnemonic(m)
+ require.Error(t, err)
+ // Multiple kinds of errors can result from a truncated random mnemonic;
+ // don't bother comparing to any specific one
+
+ // Trailing whitespace
+ m, err = NewDefaultMnemonic()
+ require.NoError(t, err)
+ m += " "
+ err = ValidateMnemonic(m)
+ require.Error(t, err)
+ require.Equal(t, ErrSurroundingWhitespace, err)
+
+ m, err = NewDefaultMnemonic()
+ require.NoError(t, err)
+ m += "\n"
+ err = ValidateMnemonic(m)
+ require.Error(t, err)
+ require.Equal(t, ErrSurroundingWhitespace, err)
+
+ // Preceding whitespace
+ m, err = NewDefaultMnemonic()
+ require.NoError(t, err)
+ m = " " + m
+ err = ValidateMnemonic(m)
+ require.Error(t, err)
+ require.Equal(t, ErrSurroundingWhitespace, err)
+
+ m, err = NewDefaultMnemonic()
+ require.NoError(t, err)
+ m = "\n" + m
+ err = ValidateMnemonic(m)
+ require.Error(t, err)
+ require.Equal(t, ErrSurroundingWhitespace, err)
+
+ // Extra whitespace between words
+ m, err = NewDefaultMnemonic()
+ require.NoError(t, err)
+ ms := strings.Split(m, " ")
+ m = strings.Join(ms, " ")
+ err = ValidateMnemonic(m)
+ require.Error(t, err)
+ require.Equal(t, ErrInvalidSeparator, err)
+
+ // Contains invalid word
+ m, err = NewDefaultMnemonic()
+ require.NoError(t, err)
+ ms = strings.Split(m, " ")
+ ms[2] = "foo"
+ m = strings.Join(ms, " ")
+ err = ValidateMnemonic(m)
+ require.Error(t, err)
+ require.Equal(t, ErrUnknownWord, err)
+
+ // Invalid number of words
+ m, err = NewDefaultMnemonic()
+ require.NoError(t, err)
+ ms = strings.Split(m, " ")
+ m = strings.Join(ms[:len(ms)-1], " ")
+ err = ValidateMnemonic(m)
+ require.Error(t, err)
+ require.Equal(t, ErrInvalidNumberOfWords, err)
+}
+
+func TestInvalidMnemonicChecksum(t *testing.T) {
+ badChecksumMnemonics := []string{
+ "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon yellow",
+ "chief stadium sniff exhibit ostrich exit fruit noodle good coin coin supply",
+ }
+
+ for _, m := range badChecksumMnemonics {
+ t.Run(m, func(t *testing.T) {
+ err := ValidateMnemonic(m)
+ require.Error(t, err)
+ require.Equal(t, ErrChecksumIncorrect, err)
+ })
+ }
+}
+
+func TestNewEntropy(t *testing.T) {
+ // Good tests.
+ for i := 128; i <= 256; i += 32 {
+ _, err := NewEntropy(i)
+ require.NoError(t, err)
+ }
+ // Bad Values
+ for i := 0; i <= 256; i++ {
+ if i%8 != 0 {
+ _, err := NewEntropy(i)
+ require.Error(t, err)
+ require.Equal(t, ErrInvalidEntropyLength, err)
+ }
+ }
+}
+
+func TestIsMnemonicChecksumValidForDifferentArrayLangths(t *testing.T) {
+ max := 1000
+ for i := 0; i < max; i++ {
+ //16, 20, 24, 28, 32
+ length := 16 + (i%5)*4
+ seed := make([]byte, length)
+ if n, err := rand.Read(seed); err != nil {
+ t.Errorf("%v", err)
+ } else if n != length {
+ t.Errorf("Wrong number of bytes read: %d", n)
+ }
+
+ mnemonic, err := NewMnemonic(seed)
+ if err != nil {
+ t.Errorf("%v", err)
+ }
+
+ isValid := isMnemonicChecksumValid(strings.Split(mnemonic, " "))
+ require.True(t, isValid)
+ }
+}
+
+func TestPadByteSlice(t *testing.T) {
+ require.Equal(t, []byte{0}, padByteSlice([]byte{}, 1))
+ require.Equal(t, []byte{0, 1}, padByteSlice([]byte{1}, 2))
+ require.Equal(t, []byte{1, 1}, padByteSlice([]byte{1, 1}, 2))
+ require.Equal(t, []byte{1, 1, 1}, padByteSlice([]byte{1, 1, 1}, 2))
+}
+
+func TestIsMnemonicChecksumValidForZeroLeadingSeeds(t *testing.T) {
+ ms := []string{
+ "00000000000000000000000000000000",
+ "00a84c51041d49acca66e6160c1fa999",
+ "00ca45df1673c76537a2020bfed1dafd",
+ "0019d5871c7b81fd83d474ef1c1e1dae",
+ "00dcb021afb35ffcdd1d032d2056fc86",
+ "0062be7bd09a27288b6cf0eb565ec739",
+ "00dc705b5efa0adf25b9734226ba60d4",
+ "0017747418d54c6003fa64fade83374b",
+ "000d44d3ee7c3dfa45e608c65384431b",
+ "008241c1ef976b0323061affe5bf24b9",
+ "00a6aec77e4d16bea80b50a34991aaba",
+ "0011527b8c6ddecb9d0c20beccdeb58d",
+ "001c938c503c8f5a2bba2248ff621546",
+ "0002f90aaf7a8327698f0031b6317c36",
+ "00bff43071ed7e07f77b14f615993bac",
+ "00da143e00ef17fc63b6fb22dcc2c326",
+ "00ffc6764fb32a354cab1a3ddefb015d",
+ "0062ef47e0985e8953f24760b7598cdd",
+ "003bf9765064f71d304908d906c065f5",
+ "00993851503471439d154b3613947474",
+ "007ad0ffe9eae753a483a76af06dfa67",
+ "00091824db9ec19e663bee51d64c83cc",
+ "00f48ac621f7e3cb39b2012ac3121543",
+ "0072917415cdca24dfa66c4a92c885b4",
+ "0027ced2b279ea8a91d29364487cdbf4",
+ "00b9c0d37fb10ba272e55842ad812583",
+ "004b3d0d2b9285946c687a5350479c8c",
+ "00c7c12a37d3a7f8c1532b17c89b724c",
+ "00f400c5545f06ae17ad00f3041e4e26",
+ "001e290be10df4d209f247ac5878662b",
+ "00bf0f74568e582a7dd1ee64f792ec8b",
+ "00d2e43ecde6b72b847db1539ed89e23",
+ "00cecba6678505bb7bfec8ed307251f6",
+ "000aeed1a9edcbb4bc88f610d3ce84eb",
+ "00d06206aadfc25c2b21805d283f15ae",
+ "00a31789a2ab2d54f8fadd5331010287",
+ "003493c5f520e8d5c0483e895a121dc9",
+ "004706112800b76001ece2e268bc830e",
+ "00ab31e28bb5305be56e38337dbfa486",
+ "006872fe85df6b0fa945248e6f9379d1",
+ "00717e5e375da6934e3cfdf57edaf3bd",
+ "007f1b46e7b9c4c76e77c434b9bccd6b",
+ "00dc93735aa35def3b9a2ff676560205",
+ "002cd5dcd881a49c7b87714c6a570a76",
+ "0013b5af9e13fac87e0c505686cfb6bf",
+ "007ab1ec9526b0bc04b64ae65fd42631",
+ "00abb4e11d8385c1cca905a6a65e9144",
+ "00574fc62a0501ad8afada2e246708c3",
+ "005207e0a815bb2da6b4c35ec1f2bf52",
+ "00f3460f136fb9700080099cbd62bc18",
+ "007a591f204c03ca7b93981237112526",
+ "00cfe0befd428f8e5f83a5bfc801472e",
+ "00987551ac7a879bf0c09b8bc474d9af",
+ "00cadd3ce3d78e49fbc933a85682df3f",
+ "00bfbf2e346c855ccc360d03281455a1",
+ "004cdf55d429d028f715544ce22d4f31",
+ "0075c84a7d15e0ac85e1e41025eed23b",
+ "00807dddd61f71725d336cab844d2cb5",
+ "00422f21b77fe20e367467ed98c18410",
+ "00b44d0ac622907119c626c850a462fd",
+ "00363f5e7f22fc49f3cd662a28956563",
+ "000fe5837e68397bbf58db9f221bdc4e",
+ "0056af33835c888ef0c22599686445d3",
+ "00790a8647fd3dfb38b7e2b6f578f2c6",
+ "00da8d9009675cb7beec930e263014fb",
+ "00d4b384540a5bb54aa760edaa4fb2fe",
+ "00be9b1479ed680fdd5d91a41eb926d0",
+ "009182347502af97077c40a6e74b4b5c",
+ "00f5c90ee1c67fa77fd821f8e9fab4f1",
+ "005568f9a2dd6b0c0cc2f5ba3d9cac38",
+ "008b481f8678577d9cf6aa3f6cd6056b",
+ "00c4323ece5e4fe3b6cd4c5c932931af",
+ "009791f7550c3798c5a214cb2d0ea773",
+ "008a7baab22481f0ad8167dd9f90d55c",
+ "00f0e601519aafdc8ff94975e64c946d",
+ "0083b61e0daa9219df59d697c270cd31",
+ }
+
+ for _, m := range ms {
+ seed, err := hex.DecodeString(m)
+ require.NoError(t, err)
+
+ mnemonic, err := NewMnemonic(seed)
+ require.NoError(t, err)
+
+ isValid := isMnemonicChecksumValid(strings.Split(mnemonic, " "))
+ require.True(t, isValid)
+ }
+}
+
+func TestEntropyFromMnemonic128(t *testing.T) {
+ testEntropyFromMnemonic(t, 128)
+}
+
+func TestEntropyFromMnemonic160(t *testing.T) {
+ testEntropyFromMnemonic(t, 160)
+}
+
+func TestEntropyFromMnemonic192(t *testing.T) {
+ testEntropyFromMnemonic(t, 192)
+}
+
+func TestEntropyFromMnemonic224(t *testing.T) {
+ testEntropyFromMnemonic(t, 224)
+}
+
+func TestEntropyFromMnemonic256(t *testing.T) {
+ testEntropyFromMnemonic(t, 256)
+}
+
+func TestEntropyFromMnemonicInvalidChecksum(t *testing.T) {
+ _, err := EntropyFromMnemonic("abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon yellow")
+ require.Equal(t, ErrChecksumIncorrect, err)
+}
+
+func TestEntropyFromMnemonicInvalidMnemonicSize(t *testing.T) {
+ for _, mnemonic := range []string{
+ "a a a a a a a a a a a a a a a a a a a a a a a a a", // Too many words
+ "a", // Too few
+ "a a a a a a a a a a a a a a", // Not multiple of 3
+ } {
+ _, err := EntropyFromMnemonic(mnemonic)
+ require.Equal(t, ErrInvalidNumberOfWords, err)
+ }
+}
+
+func testEntropyFromMnemonic(t *testing.T, bitSize int) {
+ for i := 0; i < 512; i++ {
+ expectedEntropy, err := NewEntropy(bitSize)
+ require.NoError(t, err)
+ require.True(t, len(expectedEntropy) != 0)
+
+ mnemonic, err := NewMnemonic(expectedEntropy)
+ require.NoError(t, err)
+ require.True(t, len(mnemonic) != 0)
+
+ actualEntropy, err := EntropyFromMnemonic(mnemonic)
+ require.NoError(t, err)
+ require.Equal(t, expectedEntropy, actualEntropy)
+ }
+}
+
+func testVectors() []vector {
+ return []vector{
+ {
+ entropy: "00000000000000000000000000000000",
+ mnemonic: "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about",
+ seed: "c55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04",
+ },
+ {
+ entropy: "7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f",
+ mnemonic: "legal winner thank year wave sausage worth useful legal winner thank yellow",
+ seed: "2e8905819b8723fe2c1d161860e5ee1830318dbf49a83bd451cfb8440c28bd6fa457fe1296106559a3c80937a1c1069be3a3a5bd381ee6260e8d9739fce1f607",
+ },
+ {
+ entropy: "80808080808080808080808080808080",
+ mnemonic: "letter advice cage absurd amount doctor acoustic avoid letter advice cage above",
+ seed: "d71de856f81a8acc65e6fc851a38d4d7ec216fd0796d0a6827a3ad6ed5511a30fa280f12eb2e47ed2ac03b5c462a0358d18d69fe4f985ec81778c1b370b652a8",
+ },
+ {
+ entropy: "ffffffffffffffffffffffffffffffff",
+ mnemonic: "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo wrong",
+ seed: "ac27495480225222079d7be181583751e86f571027b0497b5b5d11218e0a8a13332572917f0f8e5a589620c6f15b11c61dee327651a14c34e18231052e48c069",
+ },
+ {
+ entropy: "000000000000000000000000000000000000000000000000",
+ mnemonic: "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon agent",
+ seed: "035895f2f481b1b0f01fcf8c289c794660b289981a78f8106447707fdd9666ca06da5a9a565181599b79f53b844d8a71dd9f439c52a3d7b3e8a79c906ac845fa",
+ },
+ {
+ entropy: "7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f",
+ mnemonic: "legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal will",
+ seed: "f2b94508732bcbacbcc020faefecfc89feafa6649a5491b8c952cede496c214a0c7b3c392d168748f2d4a612bada0753b52a1c7ac53c1e93abd5c6320b9e95dd",
+ },
+ {
+ entropy: "808080808080808080808080808080808080808080808080",
+ mnemonic: "letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter always",
+ seed: "107d7c02a5aa6f38c58083ff74f04c607c2d2c0ecc55501dadd72d025b751bc27fe913ffb796f841c49b1d33b610cf0e91d3aa239027f5e99fe4ce9e5088cd65",
+ },
+ {
+ entropy: "ffffffffffffffffffffffffffffffffffffffffffffffff",
+ mnemonic: "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo when",
+ seed: "0cd6e5d827bb62eb8fc1e262254223817fd068a74b5b449cc2f667c3f1f985a76379b43348d952e2265b4cd129090758b3e3c2c49103b5051aac2eaeb890a528",
+ },
+ {
+ entropy: "0000000000000000000000000000000000000000000000000000000000000000",
+ mnemonic: "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art",
+ seed: "bda85446c68413707090a52022edd26a1c9462295029f2e60cd7c4f2bbd3097170af7a4d73245cafa9c3cca8d561a7c3de6f5d4a10be8ed2a5e608d68f92fcc8",
+ },
+ {
+ entropy: "7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f",
+ mnemonic: "legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth title",
+ seed: "bc09fca1804f7e69da93c2f2028eb238c227f2e9dda30cd63699232578480a4021b146ad717fbb7e451ce9eb835f43620bf5c514db0f8add49f5d121449d3e87",
+ },
+ {
+ entropy: "8080808080808080808080808080808080808080808080808080808080808080",
+ mnemonic: "letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic bless",
+ seed: "c0c519bd0e91a2ed54357d9d1ebef6f5af218a153624cf4f2da911a0ed8f7a09e2ef61af0aca007096df430022f7a2b6fb91661a9589097069720d015e4e982f",
+ },
+ {
+ entropy: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ mnemonic: "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo vote",
+ seed: "dd48c104698c30cfe2b6142103248622fb7bb0ff692eebb00089b32d22484e1613912f0a5b694407be899ffd31ed3992c456cdf60f5d4564b8ba3f05a69890ad",
+ },
+ {
+ entropy: "77c2b00716cec7213839159e404db50d",
+ mnemonic: "jelly better achieve collect unaware mountain thought cargo oxygen act hood bridge",
+ seed: "b5b6d0127db1a9d2226af0c3346031d77af31e918dba64287a1b44b8ebf63cdd52676f672a290aae502472cf2d602c051f3e6f18055e84e4c43897fc4e51a6ff",
+ },
+ {
+ entropy: "b63a9c59a6e641f288ebc103017f1da9f8290b3da6bdef7b",
+ mnemonic: "renew stay biology evidence goat welcome casual join adapt armor shuffle fault little machine walk stumble urge swap",
+ seed: "9248d83e06f4cd98debf5b6f010542760df925ce46cf38a1bdb4e4de7d21f5c39366941c69e1bdbf2966e0f6e6dbece898a0e2f0a4c2b3e640953dfe8b7bbdc5",
+ },
+ {
+ entropy: "3e141609b97933b66a060dcddc71fad1d91677db872031e85f4c015c5e7e8982",
+ mnemonic: "dignity pass list indicate nasty swamp pool script soccer toe leaf photo multiply desk host tomato cradle drill spread actor shine dismiss champion exotic",
+ seed: "ff7f3184df8696d8bef94b6c03114dbee0ef89ff938712301d27ed8336ca89ef9635da20af07d4175f2bf5f3de130f39c9d9e8dd0472489c19b1a020a940da67",
+ },
+ {
+ entropy: "0460ef47585604c5660618db2e6a7e7f",
+ mnemonic: "afford alter spike radar gate glance object seek swamp infant panel yellow",
+ seed: "65f93a9f36b6c85cbe634ffc1f99f2b82cbb10b31edc7f087b4f6cb9e976e9faf76ff41f8f27c99afdf38f7a303ba1136ee48a4c1e7fcd3dba7aa876113a36e4",
+ },
+ {
+ entropy: "72f60ebac5dd8add8d2a25a797102c3ce21bc029c200076f",
+ mnemonic: "indicate race push merry suffer human cruise dwarf pole review arch keep canvas theme poem divorce alter left",
+ seed: "3bbf9daa0dfad8229786ace5ddb4e00fa98a044ae4c4975ffd5e094dba9e0bb289349dbe2091761f30f382d4e35c4a670ee8ab50758d2c55881be69e327117ba",
+ },
+ {
+ entropy: "2c85efc7f24ee4573d2b81a6ec66cee209b2dcbd09d8eddc51e0215b0b68e416",
+ mnemonic: "clutch control vehicle tonight unusual clog visa ice plunge glimpse recipe series open hour vintage deposit universe tip job dress radar refuse motion taste",
+ seed: "fe908f96f46668b2d5b37d82f558c77ed0d69dd0e7e043a5b0511c48c2f1064694a956f86360c93dd04052a8899497ce9e985ebe0c8c52b955e6ae86d4ff4449",
+ },
+ {
+ entropy: "eaebabb2383351fd31d703840b32e9e2",
+ mnemonic: "turtle front uncle idea crush write shrug there lottery flower risk shell",
+ seed: "bdfb76a0759f301b0b899a1e3985227e53b3f51e67e3f2a65363caedf3e32fde42a66c404f18d7b05818c95ef3ca1e5146646856c461c073169467511680876c",
+ },
+ {
+ entropy: "7ac45cfe7722ee6c7ba84fbc2d5bd61b45cb2fe5eb65aa78",
+ mnemonic: "kiss carry display unusual confirm curtain upgrade antique rotate hello void custom frequent obey nut hole price segment",
+ seed: "ed56ff6c833c07982eb7119a8f48fd363c4a9b1601cd2de736b01045c5eb8ab4f57b079403485d1c4924f0790dc10a971763337cb9f9c62226f64fff26397c79",
+ },
+ {
+ entropy: "4fa1a8bc3e6d80ee1316050e862c1812031493212b7ec3f3bb1b08f168cabeef",
+ mnemonic: "exile ask congress lamp submit jacket era scheme attend cousin alcohol catch course end lucky hurt sentence oven short ball bird grab wing top",
+ seed: "095ee6f817b4c2cb30a5a797360a81a40ab0f9a4e25ecd672a3f58a0b5ba0687c096a6b14d2c0deb3bdefce4f61d01ae07417d502429352e27695163f7447a8c",
+ },
+ {
+ entropy: "18ab19a9f54a9274f03e5209a2ac8a91",
+ mnemonic: "board flee heavy tunnel powder denial science ski answer betray cargo cat",
+ seed: "6eff1bb21562918509c73cb990260db07c0ce34ff0e3cc4a8cb3276129fbcb300bddfe005831350efd633909f476c45c88253276d9fd0df6ef48609e8bb7dca8",
+ },
+ {
+ entropy: "18a2e1d81b8ecfb2a333adcb0c17a5b9eb76cc5d05db91a4",
+ mnemonic: "board blade invite damage undo sun mimic interest slam gaze truly inherit resist great inject rocket museum chief",
+ seed: "f84521c777a13b61564234bf8f8b62b3afce27fc4062b51bb5e62bdfecb23864ee6ecf07c1d5a97c0834307c5c852d8ceb88e7c97923c0a3b496bedd4e5f88a9",
+ },
+ {
+ entropy: "15da872c95a13dd738fbf50e427583ad61f18fd99f628c417a61cf8343c90419",
+ mnemonic: "beyond stage sleep clip because twist token leaf atom beauty genius food business side grid unable middle armed observe pair crouch tonight away coconut",
+ seed: "b15509eaa2d09d3efd3e006ef42151b30367dc6e3aa5e44caba3fe4d3e352e65101fbdb86a96776b91946ff06f8eac594dc6ee1d3e82a42dfe1b40fef6bcc3fd",
+ },
+ }
+}
+
+func badMnemonicSentences() []vector {
+ return []vector{
+ {
+ mnemonic: "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon",
+ err: ErrInvalidNumberOfWords,
+ },
+ {
+ mnemonic: "legal winner thank year wave sausage worth useful legal winner thank yellow yellow",
+ err: ErrInvalidNumberOfWords,
+ },
+ {
+ mnemonic: "letter advice cage absurd amount doctor acoustic avoid letter advice caged above",
+ err: ErrUnknownWord,
+ },
+ {
+ mnemonic: "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo, wrong",
+ err: ErrUnknownWord,
+ },
+ {
+ mnemonic: "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon",
+ err: ErrInvalidNumberOfWords,
+ },
+ {
+ mnemonic: "legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal will will will",
+ err: ErrInvalidNumberOfWords,
+ },
+ {
+ mnemonic: "letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter always.",
+ err: ErrUnknownWord,
+ },
+ {
+ mnemonic: "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo why",
+ err: ErrUnknownWord,
+ },
+ {
+ mnemonic: "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art art",
+ err: ErrInvalidNumberOfWords,
+ },
+ {
+ mnemonic: "legal winner thank year wave sausage worth useful legal winner thanks year wave worth useful legal winner thank year wave sausage worth title",
+ err: ErrInvalidNumberOfWords,
+ },
+ {
+ mnemonic: "letter advice cage absurd amount doctor acoustic avoid letters advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic bless",
+ err: ErrUnknownWord,
+ },
+ {
+ mnemonic: "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo voted",
+ err: ErrUnknownWord,
+ },
+ {
+ mnemonic: "jello better achieve collect unaware mountain thought cargo oxygen act hood bridge",
+ err: ErrUnknownWord,
+ },
+ {
+ mnemonic: "renew, stay, biology, evidence, goat, welcome, casual, join, adapt, armor, shuffle, fault, little, machine, walk, stumble, urge, swap",
+ err: ErrUnknownWord,
+ },
+ {
+ mnemonic: "dignity pass list indicate nasty",
+ err: ErrInvalidNumberOfWords,
+ },
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/bip39/example_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/bip39/example_test.go
new file mode 100644
index 0000000..bb4364c
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/bip39/example_test.go
@@ -0,0 +1,37 @@
+package bip39_test
+
+import (
+ "encoding/hex"
+ "fmt"
+
+ "github.com/skycoin/skycoin/src/cipher/bip39"
+)
+
+func ExampleNewMnemonic() {
+ // the entropy can be any byte slice, generated how pleased,
+ // as long its bit size is a multiple of 32 and is within
+ // the inclusive range of {128,256}
+ entropy, err := hex.DecodeString("066dca1a2bb7e8a1db2832148ce9933eea0f3ac9548d793112d9a95c9407efad")
+ if err != nil {
+ panic(err)
+ }
+
+ // generate a mnemomic
+ mnemomic, err := bip39.NewMnemonic(entropy)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(mnemomic)
+ // output:
+ // all hour make first leader extend hole alien behind guard gospel lava path output census museum junior mass reopen famous sing advance salt reform
+}
+
+func ExampleNewSeed() {
+ seed, err := bip39.NewSeed("all hour make first leader extend hole alien behind guard gospel lava path output census museum junior mass reopen famous sing advance salt reform", "TREZOR")
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(hex.EncodeToString(seed))
+ // output:
+ // 26e975ec644423f4a4c4f4215ef09b4bd7ef924e85d1d17c4cf3f136c2863cf6df0a475045652c57eb5fb41513ca2a2d67722b77e954b4b3fc11f7590449191d
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/bitcoin_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/bitcoin_test.go
new file mode 100644
index 0000000..890f9fe
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/bitcoin_test.go
@@ -0,0 +1,376 @@
+package cipher
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/skycoin/skycoin/src/cipher/base58"
+)
+
+func TestBitcoinAddress(t *testing.T) {
+ cases := []struct {
+ seckey string
+ pubkey string
+ addr string
+ }{
+ {
+ seckey: "1111111111111111111111111111111111111111111111111111111111111111",
+ pubkey: "034f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa",
+ addr: "1Q1pE5vPGEEMqRcVRMbtBK842Y6Pzo6nK9",
+ },
+ {
+ seckey: "dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd",
+ pubkey: "02ed83704c95d829046f1ac27806211132102c34e9ac7ffa1b71110658e5b9d1bd",
+ addr: "1NKRhS7iYUGTaAfaR5z8BueAJesqaTyc4a",
+ },
+ {
+ seckey: "47f7616ea6f9b923076625b4488115de1ef1187f760e65f89eb6f4f7ff04b012",
+ pubkey: "032596957532fc37e40486b910802ff45eeaa924548c0e1c080ef804e523ec3ed3",
+ addr: "19ck9VKC6KjGxR9LJg4DNMRc45qFrJguvV",
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.addr, func(t *testing.T) {
+ seckey := MustSecKeyFromHex(tc.seckey)
+
+ pubkey := MustPubKeyFromSecKey(seckey)
+ require.Equal(t, tc.pubkey, pubkey.Hex())
+
+ bitcoinAddr := BitcoinAddressFromPubKey(pubkey)
+ require.Equal(t, tc.addr, bitcoinAddr.String())
+
+ secAddr := MustBitcoinAddressFromSecKey(seckey)
+ require.Equal(t, tc.addr, secAddr.String())
+
+ secAddr, err := BitcoinAddressFromSecKey(seckey)
+ require.NoError(t, err)
+ require.Equal(t, tc.addr, secAddr.String())
+
+ pubAddr := BitcoinAddressFromPubKey(pubkey)
+ require.Equal(t, tc.addr, pubAddr.String())
+ })
+ }
+}
+
+func TestMustDecodeBase58BitcoinAddress(t *testing.T) {
+ p, _ := GenerateKeyPair()
+ a := BitcoinAddressFromPubKey(p)
+ require.NoError(t, a.Verify(p))
+
+ require.Panics(t, func() { MustDecodeBase58BitcoinAddress("") })
+ require.Panics(t, func() { MustDecodeBase58BitcoinAddress("cascs") })
+ b := a.Bytes()
+ h := string(base58.Encode(b[:len(b)/2]))
+ require.Panics(t, func() { MustDecodeBase58BitcoinAddress(h) })
+ h = string(base58.Encode(b))
+ require.NotPanics(t, func() { MustDecodeBase58BitcoinAddress(h) })
+ a2 := MustDecodeBase58BitcoinAddress(h)
+ require.Equal(t, a, a2)
+
+ require.NotPanics(t, func() { MustDecodeBase58BitcoinAddress(a.String()) })
+ a2 = MustDecodeBase58BitcoinAddress(a.String())
+ require.Equal(t, a, a2)
+
+ // preceding whitespace is invalid
+ badAddr := " " + a.String()
+ require.Panics(t, func() { MustDecodeBase58BitcoinAddress(badAddr) })
+
+ // preceding zeroes are invalid
+ badAddr = "000" + a.String()
+ require.Panics(t, func() { MustDecodeBase58BitcoinAddress(badAddr) })
+
+ // trailing whitespace is invalid
+ badAddr = a.String() + " "
+ require.Panics(t, func() { MustDecodeBase58BitcoinAddress(badAddr) })
+
+ // trailing zeroes are invalid
+ badAddr = a.String() + "000"
+ require.Panics(t, func() { MustDecodeBase58BitcoinAddress(badAddr) })
+
+ null := "1111111111111111111111111"
+ require.Panics(t, func() { MustDecodeBase58BitcoinAddress(null) })
+}
+
+func TestDecodeBase58BitcoinAddress(t *testing.T) {
+ p, _ := GenerateKeyPair()
+ a := BitcoinAddressFromPubKey(p)
+ require.NoError(t, a.Verify(p))
+
+ _, err := DecodeBase58BitcoinAddress("")
+ require.Error(t, err)
+
+ _, err = DecodeBase58BitcoinAddress("cascs")
+ require.Error(t, err)
+
+ b := a.Bytes()
+ h := string(base58.Encode(b[:len(b)/2]))
+ _, err = DecodeBase58BitcoinAddress(h)
+ require.Error(t, err)
+
+ h = string(base58.Encode(b))
+ a2, err := DecodeBase58BitcoinAddress(h)
+ require.NoError(t, err)
+ require.Equal(t, a, a2)
+
+ as := a.String()
+ a2, err = DecodeBase58BitcoinAddress(as)
+ require.NoError(t, err)
+ require.Equal(t, a, a2)
+
+ // preceding whitespace is invalid
+ as2 := " " + as
+ _, err = DecodeBase58BitcoinAddress(as2)
+ require.Error(t, err)
+
+ // preceding zeroes are invalid
+ as2 = "000" + as
+ _, err = DecodeBase58BitcoinAddress(as2)
+ require.Error(t, err)
+
+ // trailing whitespace is invalid
+ as2 = as + " "
+ _, err = DecodeBase58BitcoinAddress(as2)
+ require.Error(t, err)
+
+ // trailing zeroes are invalid
+ as2 = as + "000"
+ _, err = DecodeBase58BitcoinAddress(as2)
+ require.Error(t, err)
+
+ // null address is invalid
+ null := "1111111111111111111111111"
+ _, err = DecodeBase58BitcoinAddress(null)
+ require.Error(t, err)
+ require.Equal(t, ErrAddressInvalidChecksum, err)
+}
+
+func TestBitcoinAddressFromBytes(t *testing.T) {
+ p, _ := GenerateKeyPair()
+ a := BitcoinAddressFromPubKey(p)
+ a2, err := BitcoinAddressFromBytes(a.Bytes())
+ require.NoError(t, err)
+ require.Equal(t, a2, a)
+
+ // Invalid number of bytes
+ b := a.Bytes()
+ _, err = BitcoinAddressFromBytes(b[:len(b)-2])
+ require.EqualError(t, err, "Invalid address length")
+
+ // Invalid checksum
+ b[len(b)-1] += byte(1)
+ _, err = BitcoinAddressFromBytes(b)
+ require.EqualError(t, err, "Invalid checksum")
+
+ a.Version = 2
+ b = a.Bytes()
+ _, err = BitcoinAddressFromBytes(b)
+ require.EqualError(t, err, "Address version invalid")
+}
+
+func TestMustBitcoinAddressFromBytes(t *testing.T) {
+ p, _ := GenerateKeyPair()
+ a := BitcoinAddressFromPubKey(p)
+ a2 := MustBitcoinAddressFromBytes(a.Bytes())
+ require.Equal(t, a2, a)
+
+ // Invalid number of bytes
+ b := a.Bytes()
+ require.Panics(t, func() {
+ MustBitcoinAddressFromBytes(b[:len(b)-2])
+ })
+
+ // Invalid checksum
+ b[len(b)-1] += byte(1)
+ require.Panics(t, func() {
+ MustBitcoinAddressFromBytes(b)
+ })
+
+ a.Version = 2
+ b = a.Bytes()
+ require.Panics(t, func() {
+ MustBitcoinAddressFromBytes(b)
+ })
+}
+
+func TestBitcoinAddressFromSecKey(t *testing.T) {
+ p, s := GenerateKeyPair()
+ a, err := BitcoinAddressFromSecKey(s)
+ require.NoError(t, err)
+ // Valid pubkey+address
+ require.NoError(t, a.Verify(p))
+
+ _, err = BitcoinAddressFromSecKey(SecKey{})
+ require.Equal(t, errors.New("Attempt to load null seckey, unsafe"), err)
+}
+
+func TestMustBitcoinAddressFromSecKey(t *testing.T) {
+ p, s := GenerateKeyPair()
+ a := MustBitcoinAddressFromSecKey(s)
+ // Valid pubkey+address
+ require.NoError(t, a.Verify(p))
+
+ require.Panics(t, func() {
+ MustBitcoinAddressFromSecKey(SecKey{})
+ })
+}
+
+func TestBitcoinAddressNull(t *testing.T) {
+ var a BitcoinAddress
+ require.True(t, a.Null())
+
+ p, _ := GenerateKeyPair()
+ a = BitcoinAddressFromPubKey(p)
+ require.False(t, a.Null())
+}
+
+func TestBitcoinAddressVerify(t *testing.T) {
+ p, _ := GenerateKeyPair()
+ a := BitcoinAddressFromPubKey(p)
+ // Valid pubkey+address
+ require.NoError(t, a.Verify(p))
+ // Invalid pubkey
+ require.Error(t, a.Verify(PubKey{}))
+ p2, _ := GenerateKeyPair()
+ require.Error(t, a.Verify(p2))
+ // Bad version
+ a.Version = 0x01
+ require.Error(t, a.Verify(p))
+}
+
+func TestBitcoinWIFRoundTrip(t *testing.T) {
+ _, seckey1 := GenerateKeyPair()
+ wif1 := BitcoinWalletImportFormatFromSeckey(seckey1)
+ seckey2, err := SecKeyFromBitcoinWalletImportFormat(wif1)
+ wif2 := BitcoinWalletImportFormatFromSeckey(seckey2)
+
+ require.NoError(t, err)
+ require.Equal(t, seckey1, seckey2)
+ require.Equal(t, seckey1.Hex(), seckey2.Hex())
+ require.Equal(t, wif1, wif2)
+}
+
+func TestBitcoinWIF(t *testing.T) {
+ cases := []struct {
+ wif string
+ pubkey string
+ addr string
+ }{
+ {
+ wif: "KwntMbt59tTsj8xqpqYqRRWufyjGunvhSyeMo3NTYpFYzZbXJ5Hp",
+ pubkey: "034f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa",
+ addr: "1Q1pE5vPGEEMqRcVRMbtBK842Y6Pzo6nK9",
+ },
+ {
+ wif: "L4ezQvyC6QoBhxB4GVs9fAPhUKtbaXYUn8YTqoeXwbevQq4U92vN",
+ pubkey: "02ed83704c95d829046f1ac27806211132102c34e9ac7ffa1b71110658e5b9d1bd",
+ addr: "1NKRhS7iYUGTaAfaR5z8BueAJesqaTyc4a",
+ },
+ {
+ wif: "KydbzBtk6uc7M6dXwEgTEH2sphZxSPbmDSz6kUUHi4eUpSQuhEbq",
+ pubkey: "032596957532fc37e40486b910802ff45eeaa924548c0e1c080ef804e523ec3ed3",
+ addr: "19ck9VKC6KjGxR9LJg4DNMRc45qFrJguvV",
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.addr, func(t *testing.T) {
+ seckey, err := SecKeyFromBitcoinWalletImportFormat(tc.wif)
+ require.NoError(t, err)
+
+ require.NotPanics(t, func() {
+ MustSecKeyFromBitcoinWalletImportFormat(tc.wif)
+ })
+
+ pubkey := MustPubKeyFromSecKey(seckey)
+ require.Equal(t, tc.pubkey, pubkey.Hex())
+
+ bitcoinAddr := BitcoinAddressFromPubKey(pubkey)
+ require.Equal(t, tc.addr, bitcoinAddr.String())
+ })
+ }
+}
+
+func TestBitcoinWIFFailures(t *testing.T) {
+ a := " asdio"
+ _, err := SecKeyFromBitcoinWalletImportFormat(a)
+ require.Equal(t, errors.New("Invalid base58 character"), err)
+
+ a = string(base58.Encode(randBytes(t, 37)))
+ _, err = SecKeyFromBitcoinWalletImportFormat(a)
+ require.Equal(t, errors.New("Invalid length"), err)
+
+ a = string(base58.Encode(randBytes(t, 39)))
+ _, err = SecKeyFromBitcoinWalletImportFormat(a)
+ require.Equal(t, errors.New("Invalid length"), err)
+
+ b := randBytes(t, 38)
+ b[0] = 0x70
+ a = string(base58.Encode(b))
+ _, err = SecKeyFromBitcoinWalletImportFormat(a)
+ require.Equal(t, errors.New("Bitcoin WIF: First byte invalid"), err)
+
+ b = randBytes(t, 38)
+ b[0] = 0x80
+ b[33] = 0x02
+ a = string(base58.Encode(b))
+ _, err = SecKeyFromBitcoinWalletImportFormat(a)
+ require.Equal(t, errors.New("Bitcoin WIF: Invalid 33rd byte"), err)
+
+ b = randBytes(t, 38)
+ b[0] = 0x80
+ b[33] = 0x01
+ hashed := DoubleSHA256(b[0:34])
+ chksum := hashed[0:4]
+ chksum[0] = chksum[0] + 1
+ copy(b[34:38], chksum[:])
+ a = string(base58.Encode(b))
+ _, err = SecKeyFromBitcoinWalletImportFormat(a)
+ require.Equal(t, errors.New("Bitcoin WIF: Checksum fail"), err)
+}
+
+func TestMustBitcoinWIFFailures(t *testing.T) {
+ a := " asdio"
+ require.Panics(t, func() {
+ MustSecKeyFromBitcoinWalletImportFormat(a)
+ })
+
+ a = string(base58.Encode(randBytes(t, 37)))
+ require.Panics(t, func() {
+ MustSecKeyFromBitcoinWalletImportFormat(a)
+ })
+
+ a = string(base58.Encode(randBytes(t, 39)))
+ require.Panics(t, func() {
+ MustSecKeyFromBitcoinWalletImportFormat(a)
+ })
+
+ b := randBytes(t, 38)
+ b[0] = 0x70
+ a = string(base58.Encode(b))
+ require.Panics(t, func() {
+ MustSecKeyFromBitcoinWalletImportFormat(a)
+ })
+
+ b = randBytes(t, 38)
+ b[0] = 0x80
+ b[33] = 0x02
+ a = string(base58.Encode(b))
+ require.Panics(t, func() {
+ MustSecKeyFromBitcoinWalletImportFormat(a)
+ })
+
+ b = randBytes(t, 38)
+ b[0] = 0x80
+ b[33] = 0x01
+ hashed := DoubleSHA256(b[0:34])
+ chksum := hashed[0:4]
+ chksum[0] = chksum[0] + 1
+ copy(b[34:38], chksum[:])
+ a = string(base58.Encode(b))
+ require.Panics(t, func() {
+ MustSecKeyFromBitcoinWalletImportFormat(a)
+ })
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/crypto_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/crypto_test.go
index 91ff956..04b58d7 100644
--- a/vendor/github.com/skycoin/skycoin/src/cipher/crypto_test.go
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/crypto_test.go
@@ -4,47 +4,116 @@ import (
"bytes"
"crypto/sha256"
"encoding/hex"
+ "errors"
"testing"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/skycoin/skycoin/src/cipher/ripemd160"
)
func TestNewPubKey(t *testing.T) {
- assert.Panics(t, func() { NewPubKey(randBytes(t, 31)) })
- assert.Panics(t, func() { NewPubKey(randBytes(t, 32)) })
- assert.Panics(t, func() { NewPubKey(randBytes(t, 34)) })
- assert.Panics(t, func() { NewPubKey(randBytes(t, 0)) })
- assert.Panics(t, func() { NewPubKey(randBytes(t, 100)) })
- assert.NotPanics(t, func() { NewPubKey(randBytes(t, 33)) })
- b := randBytes(t, 33)
- p := NewPubKey(b)
- assert.True(t, bytes.Equal(p[:], b))
+ _, err := NewPubKey(randBytes(t, 31))
+ require.Equal(t, errors.New("Invalid public key length"), err)
+ _, err = NewPubKey(randBytes(t, 32))
+ require.Equal(t, errors.New("Invalid public key length"), err)
+ _, err = NewPubKey(randBytes(t, 34))
+ require.Equal(t, errors.New("Invalid public key length"), err)
+ _, err = NewPubKey(randBytes(t, 0))
+ require.Equal(t, errors.New("Invalid public key length"), err)
+ _, err = NewPubKey(randBytes(t, 100))
+ require.Equal(t, errors.New("Invalid public key length"), err)
+
+ _, err = NewPubKey(make([]byte, len(PubKey{})))
+ require.Equal(t, errors.New("Invalid public key"), err)
+
+ p, _ := GenerateKeyPair()
+ p2, err := NewPubKey(p[:])
+ require.NoError(t, err)
+ require.Equal(t, p, p2)
+}
+
+func TestMustNewPubKey(t *testing.T) {
+ require.Panics(t, func() { MustNewPubKey(randBytes(t, 31)) })
+ require.Panics(t, func() { MustNewPubKey(randBytes(t, 32)) })
+ require.Panics(t, func() { MustNewPubKey(randBytes(t, 34)) })
+ require.Panics(t, func() { MustNewPubKey(randBytes(t, 0)) })
+ require.Panics(t, func() { MustNewPubKey(randBytes(t, 100)) })
+ require.Panics(t, func() { MustNewPubKey(make([]byte, len(PubKey{}))) })
+
+ p, _ := GenerateKeyPair()
+ p2 := MustNewPubKey(p[:])
+ require.Equal(t, p, p2)
}
func TestPubKeyFromHex(t *testing.T) {
// Invalid hex
- assert.Panics(t, func() { MustPubKeyFromHex("") })
- assert.Panics(t, func() { MustPubKeyFromHex("cascs") })
+ _, err := PubKeyFromHex("")
+ require.Equal(t, errors.New("Invalid public key length"), err)
+
+ _, err = PubKeyFromHex("cascs")
+ require.Equal(t, errors.New("Invalid public key"), err)
+
+ // Empty key
+ empty := PubKey{}
+ h := hex.EncodeToString(empty[:])
+ _, err = PubKeyFromHex(h)
+ require.Equal(t, errors.New("Invalid public key"), err)
+
+ // Invalid hex length
+ p, _ := GenerateKeyPair()
+ s := hex.EncodeToString(p[:len(p)/2])
+ _, err = PubKeyFromHex(s)
+ require.Equal(t, errors.New("Invalid public key length"), err)
+
+ // Valid
+ s = hex.EncodeToString(p[:])
+ p2, err := PubKeyFromHex(s)
+ require.NoError(t, err)
+ require.Equal(t, p, p2)
+}
+
+func TestMustPubKeyFromHex(t *testing.T) {
+ // Invalid hex
+ require.Panics(t, func() { MustPubKeyFromHex("") })
+ require.Panics(t, func() { MustPubKeyFromHex("cascs") })
+
+ // Empty key
+ empty := PubKey{}
+ h := hex.EncodeToString(empty[:])
+ require.Panics(t, func() { MustPubKeyFromHex(h) })
+
// Invalid hex length
- p := NewPubKey(randBytes(t, 33))
+ p, _ := GenerateKeyPair()
s := hex.EncodeToString(p[:len(p)/2])
- assert.Panics(t, func() { MustPubKeyFromHex(s) })
+ require.Panics(t, func() { MustPubKeyFromHex(s) })
+
// Valid
s = hex.EncodeToString(p[:])
- assert.NotPanics(t, func() { MustPubKeyFromHex(s) })
- assert.Equal(t, p, MustPubKeyFromHex(s))
+ require.NotPanics(t, func() { MustPubKeyFromHex(s) })
+ require.Equal(t, p, MustPubKeyFromHex(s))
}
func TestPubKeyHex(t *testing.T) {
- b := randBytes(t, 33)
- p := NewPubKey(b)
+ p, _ := GenerateKeyPair()
h := p.Hex()
- p2 := MustPubKeyFromHex(h)
- assert.Equal(t, p2, p)
- assert.Equal(t, p2.Hex(), h)
+ p2, err := PubKeyFromHex(h)
+ require.NoError(t, err)
+ require.Equal(t, p2, p)
+ require.Equal(t, p2.Hex(), h)
+}
+
+func TestNewPubKeyRandom(t *testing.T) {
+ // Random bytes should not be valid, most of the time
+ failed := false
+ for i := 0; i < 10; i++ {
+ b := randBytes(t, 33)
+ if _, err := NewPubKey(b); err != nil {
+ failed = true
+ break
+ }
+ }
+ require.True(t, failed)
}
func TestPubKeyVerify(t *testing.T) {
@@ -52,43 +121,46 @@ func TestPubKeyVerify(t *testing.T) {
failed := false
for i := 0; i < 10; i++ {
b := randBytes(t, 33)
- if NewPubKey(b).Verify() != nil {
+ p := PubKey{}
+ copy(p[:], b[:])
+ if p.Verify() != nil {
failed = true
break
}
}
- assert.True(t, failed)
+ require.True(t, failed)
}
-func TestPubKeyVerifyNil(t *testing.T) {
+func TestPubKeyNullVerifyFails(t *testing.T) {
// Empty public key should not be valid
p := PubKey{}
- assert.NotNil(t, p.Verify())
+ require.Error(t, p.Verify())
}
func TestPubKeyVerifyDefault1(t *testing.T) {
// Generated pub key should be valid
p, _ := GenerateKeyPair()
- assert.Nil(t, p.Verify())
+ require.NoError(t, p.Verify())
}
func TestPubKeyVerifyDefault2(t *testing.T) {
for i := 0; i < 1024; i++ {
p, _ := GenerateKeyPair()
- assert.Nil(t, p.Verify())
+ require.NoError(t, p.Verify())
}
}
-func TestPubKeyToAddressHash(t *testing.T) {
+func TestPubKeyRipemd160(t *testing.T) {
p, _ := GenerateKeyPair()
- h := p.ToAddressHash()
+ h := PubKeyRipemd160(p)
// Should be Ripemd160(SHA256(SHA256()))
x := sha256.Sum256(p[:])
x = sha256.Sum256(x[:])
rh := ripemd160.New()
- rh.Write(x[:])
+ _, err := rh.Write(x[:])
+ require.NoError(t, err)
y := rh.Sum(nil)
- assert.True(t, bytes.Equal(h[:], y))
+ require.True(t, bytes.Equal(h[:], y))
}
func TestPubKeyToAddress(t *testing.T) {
@@ -96,11 +168,11 @@ func TestPubKeyToAddress(t *testing.T) {
addr := AddressFromPubKey(p)
//func (self Address) Verify(key PubKey) error {
err := addr.Verify(p)
- assert.Nil(t, err)
+ require.NoError(t, err)
addrStr := addr.String()
_, err = DecodeBase58Address(addrStr)
//func DecodeBase58Address(addr string) (Address, error) {
- assert.Nil(t, err)
+ require.NoError(t, err)
}
func TestPubKeyToAddress2(t *testing.T) {
@@ -109,236 +181,573 @@ func TestPubKeyToAddress2(t *testing.T) {
addr := AddressFromPubKey(p)
//func (self Address) Verify(key PubKey) error {
err := addr.Verify(p)
- assert.Nil(t, err)
+ require.NoError(t, err)
addrStr := addr.String()
_, err = DecodeBase58Address(addrStr)
//func DecodeBase58Address(addr string) (Address, error) {
- assert.Nil(t, err)
+ require.NoError(t, err)
}
}
+func TestNewSecKey(t *testing.T) {
+ _, err := NewSecKey(randBytes(t, 31))
+ require.Equal(t, errors.New("Invalid secret key length"), err)
+ _, err = NewSecKey(randBytes(t, 33))
+ require.Equal(t, errors.New("Invalid secret key length"), err)
+ _, err = NewSecKey(randBytes(t, 34))
+ require.Equal(t, errors.New("Invalid secret key length"), err)
+ _, err = NewSecKey(randBytes(t, 0))
+ require.Equal(t, errors.New("Invalid secret key length"), err)
+ _, err = NewSecKey(randBytes(t, 100))
+ require.Equal(t, errors.New("Invalid secret key length"), err)
+
+ b := randBytes(t, 32)
+ p, err := NewSecKey(b)
+ require.NoError(t, err)
+ require.True(t, bytes.Equal(p[:], b))
+}
+
func TestMustNewSecKey(t *testing.T) {
- assert.Panics(t, func() { NewSecKey(randBytes(t, 31)) })
- assert.Panics(t, func() { NewSecKey(randBytes(t, 33)) })
- assert.Panics(t, func() { NewSecKey(randBytes(t, 34)) })
- assert.Panics(t, func() { NewSecKey(randBytes(t, 0)) })
- assert.Panics(t, func() { NewSecKey(randBytes(t, 100)) })
- assert.NotPanics(t, func() { NewSecKey(randBytes(t, 32)) })
+ require.Panics(t, func() { MustNewSecKey(randBytes(t, 31)) })
+ require.Panics(t, func() { MustNewSecKey(randBytes(t, 33)) })
+ require.Panics(t, func() { MustNewSecKey(randBytes(t, 34)) })
+ require.Panics(t, func() { MustNewSecKey(randBytes(t, 0)) })
+ require.Panics(t, func() { MustNewSecKey(randBytes(t, 100)) })
+ require.NotPanics(t, func() { MustNewSecKey(randBytes(t, 32)) })
b := randBytes(t, 32)
- p := NewSecKey(b)
- assert.True(t, bytes.Equal(p[:], b))
+ p := MustNewSecKey(b)
+ require.True(t, bytes.Equal(p[:], b))
+}
+
+func TestSecKeyFromHex(t *testing.T) {
+ // Invalid hex
+ _, err := SecKeyFromHex("")
+ require.Equal(t, errors.New("Invalid secret key length"), err)
+
+ _, err = SecKeyFromHex("cascs")
+ require.Equal(t, errors.New("Invalid secret key"), err)
+
+ // Invalid hex length
+ p := MustNewSecKey(randBytes(t, 32))
+ s := hex.EncodeToString(p[:len(p)/2])
+ _, err = SecKeyFromHex(s)
+ require.Equal(t, errors.New("Invalid secret key length"), err)
+
+ // Valid
+ s = hex.EncodeToString(p[:])
+ p2, err := SecKeyFromHex(s)
+ require.NoError(t, err)
+ require.Equal(t, p2, p)
}
func TestMustSecKeyFromHex(t *testing.T) {
// Invalid hex
- assert.Panics(t, func() { MustSecKeyFromHex("") })
- assert.Panics(t, func() { MustSecKeyFromHex("cascs") })
+ require.Panics(t, func() { MustSecKeyFromHex("") })
+ require.Panics(t, func() { MustSecKeyFromHex("cascs") })
// Invalid hex length
- p := NewSecKey(randBytes(t, 32))
+ p := MustNewSecKey(randBytes(t, 32))
s := hex.EncodeToString(p[:len(p)/2])
- assert.Panics(t, func() { MustSecKeyFromHex(s) })
+ require.Panics(t, func() { MustSecKeyFromHex(s) })
// Valid
s = hex.EncodeToString(p[:])
- assert.NotPanics(t, func() { MustSecKeyFromHex(s) })
- assert.Equal(t, p, MustSecKeyFromHex(s))
+ require.NotPanics(t, func() { MustSecKeyFromHex(s) })
+ require.Equal(t, p, MustSecKeyFromHex(s))
}
func TestSecKeyHex(t *testing.T) {
b := randBytes(t, 32)
- p := NewSecKey(b)
+ p := MustNewSecKey(b)
h := p.Hex()
p2 := MustSecKeyFromHex(h)
- assert.Equal(t, p2, p)
- assert.Equal(t, p2.Hex(), h)
+ require.Equal(t, p2, p)
+ require.Equal(t, p2.Hex(), h)
}
func TestSecKeyVerify(t *testing.T) {
// Empty secret key should not be valid
p := SecKey{}
- assert.NotNil(t, p.Verify())
+ require.Error(t, p.Verify())
// Generated sec key should be valid
_, p = GenerateKeyPair()
- assert.Nil(t, p.Verify())
+ require.NoError(t, p.Verify())
// Random bytes are usually valid
}
-func TestECDHonce(t *testing.T) {
+func TestECDH(t *testing.T) {
pub1, sec1 := GenerateKeyPair()
pub2, sec2 := GenerateKeyPair()
- buf1 := ECDH(pub2, sec1)
- buf2 := ECDH(pub1, sec2)
+ buf1, err := ECDH(pub2, sec1)
+ require.NoError(t, err)
+ buf2, err := ECDH(pub1, sec2)
+ require.NoError(t, err)
- assert.True(t, bytes.Equal(buf1, buf2))
-}
+ require.True(t, bytes.Equal(buf1, buf2))
+
+ goodPub, goodSec := GenerateKeyPair()
+ var badPub PubKey
+ var badSec SecKey
+
+ _, err = ECDH(badPub, goodSec)
+ require.Equal(t, errors.New("ECDH invalid pubkey input"), err)
+ _, err = ECDH(goodPub, badSec)
+ require.Equal(t, errors.New("ECDH invalid seckey input"), err)
-func TestECDHloop(t *testing.T) {
for i := 0; i < 128; i++ {
pub1, sec1 := GenerateKeyPair()
pub2, sec2 := GenerateKeyPair()
- buf1 := ECDH(pub2, sec1)
- buf2 := ECDH(pub1, sec2)
- assert.True(t, bytes.Equal(buf1, buf2))
+ buf1, err := ECDH(pub2, sec1)
+ require.NoError(t, err)
+ buf2, err := ECDH(pub1, sec2)
+ require.NoError(t, err)
+ require.True(t, bytes.Equal(buf1, buf2))
}
}
+func TestMustECDH(t *testing.T) {
+ goodPub, goodSec := GenerateKeyPair()
+ var badPub PubKey
+ var badSec SecKey
+
+ require.Panics(t, func() {
+ MustECDH(badPub, goodSec)
+ })
+ require.Panics(t, func() {
+ MustECDH(goodPub, badSec)
+ })
+
+ pub1, sec1 := GenerateKeyPair()
+ pub2, sec2 := GenerateKeyPair()
+
+ buf1 := MustECDH(pub2, sec1)
+ buf2 := MustECDH(pub1, sec2)
+
+ require.True(t, bytes.Equal(buf1, buf2))
+}
+
func TestNewSig(t *testing.T) {
- assert.Panics(t, func() { NewSig(randBytes(t, 64)) })
- assert.Panics(t, func() { NewSig(randBytes(t, 66)) })
- assert.Panics(t, func() { NewSig(randBytes(t, 67)) })
- assert.Panics(t, func() { NewSig(randBytes(t, 0)) })
- assert.Panics(t, func() { NewSig(randBytes(t, 100)) })
- assert.NotPanics(t, func() { NewSig(randBytes(t, 65)) })
+ _, err := NewSig(randBytes(t, 64))
+ require.Equal(t, errors.New("Invalid signature length"), err)
+ _, err = NewSig(randBytes(t, 66))
+ require.Equal(t, errors.New("Invalid signature length"), err)
+ _, err = NewSig(randBytes(t, 67))
+ require.Equal(t, errors.New("Invalid signature length"), err)
+ _, err = NewSig(randBytes(t, 0))
+ require.Equal(t, errors.New("Invalid signature length"), err)
+ _, err = NewSig(randBytes(t, 100))
+ require.Equal(t, errors.New("Invalid signature length"), err)
+
b := randBytes(t, 65)
- p := NewSig(b)
- assert.True(t, bytes.Equal(p[:], b))
+ p, err := NewSig(b)
+ require.NoError(t, err)
+ require.True(t, bytes.Equal(p[:], b))
+}
+
+func TestMustNewSig(t *testing.T) {
+ require.Panics(t, func() { MustNewSig(randBytes(t, 64)) })
+ require.Panics(t, func() { MustNewSig(randBytes(t, 66)) })
+ require.Panics(t, func() { MustNewSig(randBytes(t, 67)) })
+ require.Panics(t, func() { MustNewSig(randBytes(t, 0)) })
+ require.Panics(t, func() { MustNewSig(randBytes(t, 100)) })
+
+ require.NotPanics(t, func() { MustNewSig(randBytes(t, 65)) })
+
+ b := randBytes(t, 65)
+ p := MustNewSig(b)
+ require.True(t, bytes.Equal(p[:], b))
+}
+
+func TestSigFromHex(t *testing.T) {
+ // Invalid hex
+ _, err := SigFromHex("")
+ require.Equal(t, errors.New("Invalid signature length"), err)
+
+ _, err = SigFromHex("cascs")
+ require.Equal(t, errors.New("Invalid signature"), err)
+
+ // Invalid hex length
+ p := MustNewSig(randBytes(t, 65))
+ s := hex.EncodeToString(p[:len(p)/2])
+ _, err = SigFromHex(s)
+ require.Equal(t, errors.New("Invalid signature length"), err)
+
+ // Valid
+ s = hex.EncodeToString(p[:])
+ s2, err := SigFromHex(s)
+ require.NoError(t, err)
+ require.Equal(t, p, s2)
}
func TestMustSigFromHex(t *testing.T) {
// Invalid hex
- assert.Panics(t, func() { MustSigFromHex("") })
- assert.Panics(t, func() { MustSigFromHex("cascs") })
+ require.Panics(t, func() { MustSigFromHex("") })
+ require.Panics(t, func() { MustSigFromHex("cascs") })
// Invalid hex length
- p := NewSig(randBytes(t, 65))
+ p := MustNewSig(randBytes(t, 65))
s := hex.EncodeToString(p[:len(p)/2])
- assert.Panics(t, func() { MustSigFromHex(s) })
+ require.Panics(t, func() { MustSigFromHex(s) })
// Valid
s = hex.EncodeToString(p[:])
- assert.NotPanics(t, func() { MustSigFromHex(s) })
- assert.Equal(t, p, MustSigFromHex(s))
+ require.NotPanics(t, func() { MustSigFromHex(s) })
+ require.Equal(t, p, MustSigFromHex(s))
}
func TestSigHex(t *testing.T) {
b := randBytes(t, 65)
- p := NewSig(b)
+ p := MustNewSig(b)
h := p.Hex()
p2 := MustSigFromHex(h)
- assert.Equal(t, p2, p)
- assert.Equal(t, p2.Hex(), h)
+ require.Equal(t, p2, p)
+ require.Equal(t, p2.Hex(), h)
}
-func TestChkSig(t *testing.T) {
+func TestVerifyAddressSignedHash(t *testing.T) {
p, s := GenerateKeyPair()
- assert.Nil(t, p.Verify())
- assert.Nil(t, s.Verify())
+ require.NoError(t, p.Verify())
+ require.NoError(t, s.Verify())
a := AddressFromPubKey(p)
- assert.Nil(t, a.Verify(p))
+ require.NoError(t, a.Verify(p))
b := randBytes(t, 256)
h := SumSHA256(b)
- sig := SignHash(h, s)
- assert.Nil(t, ChkSig(a, h, sig))
+ sig := MustSignHash(h, s)
+ require.NoError(t, VerifyAddressSignedHash(a, sig, h))
// Empty sig should be invalid
- assert.NotNil(t, ChkSig(a, h, Sig{}))
+ require.Error(t, VerifyAddressSignedHash(a, Sig{}, h))
// Random sigs should not pass
for i := 0; i < 100; i++ {
- assert.NotNil(t, ChkSig(a, h, NewSig(randBytes(t, 65))))
+ require.Error(t, VerifyAddressSignedHash(a, MustNewSig(randBytes(t, 65)), h))
}
// Sig for one hash does not work for another hash
h2 := SumSHA256(randBytes(t, 256))
- sig2 := SignHash(h2, s)
- assert.Nil(t, ChkSig(a, h2, sig2))
- assert.NotNil(t, ChkSig(a, h, sig2))
- assert.NotNil(t, ChkSig(a, h2, sig))
+ sig2 := MustSignHash(h2, s)
+ require.NoError(t, VerifyAddressSignedHash(a, sig2, h2))
+ require.Error(t, VerifyAddressSignedHash(a, sig2, h))
+ require.Error(t, VerifyAddressSignedHash(a, sig, h2))
// Different secret keys should not create same sig
p2, s2 := GenerateKeyPair()
a2 := AddressFromPubKey(p2)
- h = SHA256{}
- sig = SignHash(h, s)
- sig2 = SignHash(h, s2)
- assert.Nil(t, ChkSig(a, h, sig))
- assert.Nil(t, ChkSig(a2, h, sig2))
- assert.NotEqual(t, sig, sig2)
h = SumSHA256(randBytes(t, 256))
- sig = SignHash(h, s)
- sig2 = SignHash(h, s2)
- assert.Nil(t, ChkSig(a, h, sig))
- assert.Nil(t, ChkSig(a2, h, sig2))
- assert.NotEqual(t, sig, sig2)
+ sig = MustSignHash(h, s)
+ sig2 = MustSignHash(h, s2)
+ require.NoError(t, VerifyAddressSignedHash(a, sig, h))
+ require.NoError(t, VerifyAddressSignedHash(a2, sig2, h))
+ require.NotEqual(t, sig, sig2)
+ h = SumSHA256(randBytes(t, 256))
+ sig = MustSignHash(h, s)
+ sig2 = MustSignHash(h, s2)
+ require.NoError(t, VerifyAddressSignedHash(a, sig, h))
+ require.NoError(t, VerifyAddressSignedHash(a2, sig2, h))
+ require.NotEqual(t, sig, sig2)
// Bad address should be invalid
- assert.NotNil(t, ChkSig(a, h, sig2))
- assert.NotNil(t, ChkSig(a2, h, sig))
+ require.Error(t, VerifyAddressSignedHash(a, sig2, h))
+ require.Error(t, VerifyAddressSignedHash(a2, sig, h))
+
+ // Empty hash should panic
+ require.Panics(t, func() {
+ MustSignHash(SHA256{}, s)
+ })
}
func TestSignHash(t *testing.T) {
p, s := GenerateKeyPair()
a := AddressFromPubKey(p)
h := SumSHA256(randBytes(t, 256))
- sig := SignHash(h, s)
- assert.NotEqual(t, sig, Sig{})
- assert.Nil(t, ChkSig(a, h, sig))
+ sig, err := SignHash(h, s)
+ require.NoError(t, err)
+ require.NotEqual(t, sig, Sig{})
+ require.NoError(t, VerifyAddressSignedHash(a, sig, h))
+ require.NoError(t, VerifyPubKeySignedHash(p, sig, h))
+
+ p2, err := PubKeyFromSig(sig, h)
+ require.NoError(t, err)
+ require.Equal(t, p, p2)
+
+ _, err = SignHash(h, SecKey{})
+ require.Equal(t, ErrInvalidSecKey, err)
+
+ _, err = SignHash(SHA256{}, s)
+ require.Equal(t, ErrNullSignHash, err)
+}
+
+func TestMustSignHash(t *testing.T) {
+ p, s := GenerateKeyPair()
+ a := AddressFromPubKey(p)
+ h := SumSHA256(randBytes(t, 256))
+ sig := MustSignHash(h, s)
+ require.NotEqual(t, sig, Sig{})
+ require.NoError(t, VerifyAddressSignedHash(a, sig, h))
+
+ require.Panics(t, func() {
+ MustSignHash(h, SecKey{})
+ })
}
func TestPubKeyFromSecKey(t *testing.T) {
p, s := GenerateKeyPair()
- assert.Equal(t, PubKeyFromSecKey(s), p)
- assert.Panics(t, func() { PubKeyFromSecKey(SecKey{}) })
- assert.Panics(t, func() { PubKeyFromSecKey(NewSecKey(randBytes(t, 99))) })
- assert.Panics(t, func() { PubKeyFromSecKey(NewSecKey(randBytes(t, 31))) })
+ p2, err := PubKeyFromSecKey(s)
+ require.NoError(t, err)
+ require.Equal(t, p2, p)
+
+ _, err = PubKeyFromSecKey(SecKey{})
+ require.Equal(t, errors.New("Attempt to load null seckey, unsafe"), err)
+}
+
+func TestMustPubKeyFromSecKey(t *testing.T) {
+ p, s := GenerateKeyPair()
+ require.Equal(t, MustPubKeyFromSecKey(s), p)
+ require.Panics(t, func() { MustPubKeyFromSecKey(SecKey{}) })
}
func TestPubKeyFromSig(t *testing.T) {
p, s := GenerateKeyPair()
h := SumSHA256(randBytes(t, 256))
- sig := SignHash(h, s)
+ sig := MustSignHash(h, s)
p2, err := PubKeyFromSig(sig, h)
- assert.Equal(t, p, p2)
- assert.Nil(t, err)
+ require.Equal(t, p, p2)
+ require.NoError(t, err)
_, err = PubKeyFromSig(Sig{}, h)
- assert.NotNil(t, err)
+ require.Error(t, err)
}
-func TestVerifySignature(t *testing.T) {
+func TestMustPubKeyFromSig(t *testing.T) {
+ p, s := GenerateKeyPair()
+ h := SumSHA256(randBytes(t, 256))
+ sig := MustSignHash(h, s)
+ p2 := MustPubKeyFromSig(sig, h)
+ require.Equal(t, p, p2)
+
+ require.Panics(t, func() {
+ _ = MustPubKeyFromSig(Sig{}, h)
+ })
+}
+
+func TestVerifyPubKeySignedHash(t *testing.T) {
p, s := GenerateKeyPair()
h := SumSHA256(randBytes(t, 256))
h2 := SumSHA256(randBytes(t, 256))
- sig := SignHash(h, s)
- assert.Nil(t, VerifySignature(p, sig, h))
- assert.NotNil(t, VerifySignature(p, Sig{}, h))
- assert.NotNil(t, VerifySignature(p, sig, h2))
+ sig := MustSignHash(h, s)
+ require.NoError(t, VerifyPubKeySignedHash(p, sig, h))
+ require.Error(t, VerifyPubKeySignedHash(p, Sig{}, h))
+ require.Error(t, VerifyPubKeySignedHash(p, sig, h2))
p2, _ := GenerateKeyPair()
- assert.NotNil(t, VerifySignature(p2, sig, h))
- assert.NotNil(t, VerifySignature(PubKey{}, sig, h))
+ require.Error(t, VerifyPubKeySignedHash(p2, sig, h))
+ require.Error(t, VerifyPubKeySignedHash(PubKey{}, sig, h))
}
func TestGenerateKeyPair(t *testing.T) {
- p, s := GenerateKeyPair()
- assert.Nil(t, p.Verify())
- assert.Nil(t, s.Verify())
+ for i := 0; i < 10; i++ {
+ p, s := GenerateKeyPair()
+ require.NoError(t, p.Verify())
+ require.NoError(t, s.Verify())
+ err := CheckSecKey(s)
+ require.NoError(t, err)
+ }
}
func TestGenerateDeterministicKeyPair(t *testing.T) {
// TODO -- deterministic key pairs are useless as is because we can't
// generate pair n+1, only pair 0
seed := randBytes(t, 32)
- p, s := GenerateDeterministicKeyPair(seed)
- assert.Nil(t, p.Verify())
- assert.Nil(t, s.Verify())
- p, s = GenerateDeterministicKeyPair(seed)
- assert.Nil(t, p.Verify())
- assert.Nil(t, s.Verify())
+ p, s := MustGenerateDeterministicKeyPair(seed)
+ require.NoError(t, p.Verify())
+ require.NoError(t, s.Verify())
+ p, s = MustGenerateDeterministicKeyPair(seed)
+ require.NoError(t, p.Verify())
+ require.NoError(t, s.Verify())
+
+ _, _, err := GenerateDeterministicKeyPair(nil)
+ require.Equal(t, errors.New("Seed input is empty"), err)
+
+ require.Panics(t, func() {
+ MustGenerateDeterministicKeyPair(nil)
+ })
}
-func TestSecKeTest(t *testing.T) {
+func TestGenerateDeterministicKeyPairs(t *testing.T) {
+ seed := randBytes(t, 32)
+ keys, err := GenerateDeterministicKeyPairs(seed, 4)
+ require.NoError(t, err)
+ require.Len(t, keys, 4)
+ for _, s := range keys {
+ require.NoError(t, s.Verify())
+ }
+
+ keys2 := MustGenerateDeterministicKeyPairs(seed, 4)
+ require.Equal(t, keys, keys2)
+
+ _, err = GenerateDeterministicKeyPairs(nil, 1)
+ require.Equal(t, errors.New("Seed input is empty"), err)
+
+ require.Panics(t, func() {
+ MustGenerateDeterministicKeyPairs(nil, 1)
+ })
+}
+
+func TestGenerateDeterministicKeyPairsSeed(t *testing.T) {
+ seed := randBytes(t, 32)
+ newSeed, keys, err := GenerateDeterministicKeyPairsSeed(seed, 4)
+ require.NoError(t, err)
+ require.Len(t, newSeed, 32)
+ require.NotEqual(t, seed, newSeed)
+ require.Len(t, keys, 4)
+ for _, s := range keys {
+ require.NoError(t, s.Verify())
+ }
+
+ newSeed2, keys2 := MustGenerateDeterministicKeyPairsSeed(seed, 4)
+ require.Equal(t, newSeed, newSeed2)
+ require.Equal(t, keys, keys2)
+
+ _, _, err = GenerateDeterministicKeyPairsSeed(nil, 4)
+ require.Equal(t, errors.New("Seed input is empty"), err)
+
+ require.Panics(t, func() {
+ MustGenerateDeterministicKeyPairsSeed(nil, 4)
+ })
+}
+
+func TestDeterministicKeyPairIterator(t *testing.T) {
+ seed := randBytes(t, 32)
+ newSeed, p, s, err := DeterministicKeyPairIterator(seed)
+ require.NoError(t, err)
+ require.NoError(t, p.Verify())
+ require.NoError(t, s.Verify())
+ require.NotEqual(t, seed, newSeed)
+ require.Len(t, newSeed, 32)
+
+ newSeed2, p2, s2 := MustDeterministicKeyPairIterator(seed)
+ require.Equal(t, newSeed, newSeed2)
+ require.Equal(t, p, p2)
+ require.Equal(t, s, s2)
+
+ _, _, _, err = DeterministicKeyPairIterator(nil)
+ require.Equal(t, errors.New("Seed input is empty"), err)
+
+ require.Panics(t, func() {
+ MustDeterministicKeyPairIterator(nil)
+ })
+}
+
+func TestCheckSecKey(t *testing.T) {
_, s := GenerateKeyPair()
- assert.Nil(t, TestSecKey(s))
- assert.NotNil(t, TestSecKey(SecKey{}))
+ require.NoError(t, CheckSecKey(s))
+ require.Error(t, CheckSecKey(SecKey{}))
}
-func TestSecKeyHashTest(t *testing.T) {
+func TestCheckSecKeyHash(t *testing.T) {
_, s := GenerateKeyPair()
h := SumSHA256(randBytes(t, 256))
- assert.Nil(t, TestSecKeyHash(s, h))
- assert.NotNil(t, TestSecKeyHash(SecKey{}, h))
+ require.NoError(t, CheckSecKeyHash(s, h))
+ require.Error(t, CheckSecKeyHash(SecKey{}, h))
}
func TestGenerateDeterministicKeyPairsUsesAllBytes(t *testing.T) {
// Tests that if a seed >128 bits is used, the generator does not ignore bits >128
seed := "property diet little foster provide disagree witness mountain alley weekend kitten general"
- seckeys := GenerateDeterministicKeyPairs([]byte(seed), 3)
- seckeys2 := GenerateDeterministicKeyPairs([]byte(seed[:16]), 3)
+ seckeys := MustGenerateDeterministicKeyPairs([]byte(seed), 3)
+ seckeys2 := MustGenerateDeterministicKeyPairs([]byte(seed[:16]), 3)
require.NotEqual(t, seckeys, seckeys2)
}
+
+func TestPubkey1(t *testing.T) {
+ // This was migrated from coin/coin_test.go
+ a := "02fa939957e9fc52140e180264e621c2576a1bfe781f88792fb315ca3d1786afb8"
+ b, err := hex.DecodeString(a)
+ require.NoError(t, err)
+
+ p, err := NewPubKey(b)
+ require.NoError(t, err)
+ require.NoError(t, p.Verify())
+
+ addr := AddressFromPubKey(p)
+ require.NoError(t, addr.Verify(p))
+}
+
+func TestSecKey1(t *testing.T) {
+ // This was migrated from coin/coin_test.go
+ a := "5a42c0643bdb465d90bf673b99c14f5fa02db71513249d904573d2b8b63d353d"
+ b, err := hex.DecodeString(a)
+ require.NoError(t, err)
+ require.Len(t, b, 32)
+
+ seckey, err := NewSecKey(b)
+ require.NoError(t, err)
+ require.NoError(t, seckey.Verify())
+
+ pubkey, err := PubKeyFromSecKey(seckey)
+ require.NoError(t, err)
+ require.NoError(t, pubkey.Verify())
+
+ addr := AddressFromPubKey(pubkey)
+ require.NoError(t, addr.Verify(pubkey))
+
+ test := []byte("test message")
+ hash := SumSHA256(test)
+ err = CheckSecKeyHash(seckey, hash)
+ require.NoError(t, err)
+}
+
+func TestSecKeyPubKeyNull(t *testing.T) {
+ var pk PubKey
+ require.True(t, pk.Null())
+ pk[0] = 1
+ require.False(t, pk.Null())
+
+ var sk SecKey
+ require.True(t, sk.Null())
+ sk[0] = 1
+ require.False(t, sk.Null())
+
+ sk, err := NewSecKey(randBytes(t, 32))
+ require.NoError(t, err)
+ pk = MustPubKeyFromSecKey(sk)
+
+ require.False(t, sk.Null())
+ require.False(t, pk.Null())
+}
+
+func TestVerifySignatureRecoverPubKey(t *testing.T) {
+ h := MustSHA256FromHex("127e9b0d6b71cecd0363b366413f0f19fcd924ae033513498e7486570ff2a1c8")
+ sig := MustSigFromHex("63c035b0c95d0c5744fc1c0bdf38af02cef2d2f65a8f923732ab44e436f8a491216d9ab5ff795e3144f4daee37077b8b9db54d2ba3a3df8d4992f06bb21f724401")
+
+ err := VerifySignatureRecoverPubKey(sig, h)
+ require.NoError(t, err)
+
+ // Fails with ErrInvalidHashForSig
+ badSigHex := "71f2c01516fe696328e79bcf464eb0db374b63d494f7a307d1e77114f18581d7a81eed5275a9e04a336292dd2fd16977d9bef2a54ea3161d0876603d00c53bc9dd"
+ badSig := MustSigFromHex(badSigHex)
+ err = VerifySignatureRecoverPubKey(badSig, h)
+ require.Equal(t, ErrInvalidHashForSig, err)
+
+ // Fails with ErrInvalidSigPubKeyRecovery
+ badSig = MustSigFromHex("63c035b0c95d0c5744fc1c0bdf39af02cef2d2f65a8f923732ab44e436f8a491216d9ab5ff795e3144f4daee37077b8b9db54d2ba3a3df8d4992f06bb21f724401")
+ err = VerifySignatureRecoverPubKey(badSig, h)
+ require.Equal(t, ErrInvalidSigPubKeyRecovery, err)
+}
+
+func TestHighSPointSigInvalid(t *testing.T) {
+ // Verify that signatures that were generated with forceLowS=false
+ // are not accepted as valid, to avoid a signature malleability case.
+ // Refer to secp256k1go's TestSigForceLowS for the reference test inputs
+
+ h := MustSHA256FromHex("DD72CBF2203C1A55A411EEC4404AF2AFB2FE942C434B23EFE46E9F04DA8433CA")
+
+ // This signature has a high S point (the S point is above the half-order of the curve)
+ sigHexHighS := "8c20a668be1b5a910205de46095023fe4823a3757f4417114168925f28193bffadf317cc256cec28d90d5b2b7e1ce6a45cd5f3b10880ab5f99c389c66177d39a01"
+ s := MustSigFromHex(sigHexHighS)
+ err := VerifySignatureRecoverPubKey(s, h)
+
+ require.Error(t, err)
+ require.Equal(t, "Signature not valid for hash", err.Error())
+
+ // This signature has a low S point (the S point is below the half-order of the curve).
+ // It is equal to forceLowS(sigHighS).
+ sigHexLowS := "8c20a668be1b5a910205de46095023fe4823a3757f4417114168925f28193bff520ce833da9313d726f2a4d481e3195a5dd8e935a6c7f4dc260ed4c66ebe6da700"
+ s2 := MustSigFromHex(sigHexLowS)
+ err = VerifySignatureRecoverPubKey(s2, h)
+ require.NoError(t, err)
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/encoder/benchmark_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/encoder/benchmark_test.go
new file mode 100644
index 0000000..76a20e8
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/encoder/benchmark_test.go
@@ -0,0 +1,33 @@
+package encoder
+
+import "testing"
+
+// benchmarkExample is the same struct used in https://github.com/gz-c/gosercomp benchmarks
+type benchmarkExample struct {
+ ID int32
+ Name string
+ Colors []string
+}
+
+var benchmarkExampleObj = benchmarkExample{
+ ID: 1,
+ Name: "Reds",
+ Colors: []string{"Crimson", "Red", "Ruby", "Maroon"},
+}
+
+func BenchmarkDeserializeRaw(b *testing.B) {
+ byt := Serialize(benchmarkExampleObj)
+ result := &benchmarkExample{}
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ DeserializeRaw(byt, result) //nolint:errcheck
+ }
+}
+
+func BenchmarkSerialize(b *testing.B) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ Serialize(&benchmarkExampleObj)
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/encoder/encoder_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/encoder/encoder_test.go
index 901ef9b..d73b54d 100644
--- a/vendor/github.com/skycoin/skycoin/src/cipher/encoder/encoder_test.go
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/encoder/encoder_test.go
@@ -4,7 +4,11 @@ import (
"bytes"
"crypto/rand"
"encoding/hex"
- "log"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "math"
+ "os"
"reflect"
"testing"
@@ -13,27 +17,17 @@ import (
"github.com/skycoin/skycoin/src/cipher"
)
-func randBytes(n int) []byte { // nolint: unparam
+func randBytes(t *testing.T, n uint64) []byte { //nolint:unparam
const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
var bytes = make([]byte, n)
- rand.Read(bytes)
+ _, err := rand.Read(bytes)
+ require.NoError(t, err)
for i, b := range bytes {
bytes[i] = alphanum[b%byte(len(alphanum))]
}
return bytes
}
-/*
-* the file name has to end with _test.go to be picked up as a set of tests by go test
-* the package name has to be the same as in the source file that has to be tested
-* you have to import the package testing
-* all test functions should start with Test to be run as a test
-* the tests will be executed in the same order that they are appear in the source
-* the test function TestXxx functions take a pointer to the type testing.T. You use it to record the test status and also for logging.
-* the signature of the test function should always be func TestXxx ( *testing.T). You can have any combination of alphanumeric characters and the hyphen for the Xxx part, the only constraint that it should not begin with a small alphabet, [a-z].
-* a call to any of the following functions of testing.T within the test code Error, Errorf, FailNow, Fatal, FatalIf will indicate to go test that the test has failed.
- */
-
//Size of= 13
type TestStruct struct {
X int32
@@ -66,88 +60,70 @@ type TestStructWithoutIgnore struct {
K []byte
}
-//func (*B) Fatal
-
-func Test_Encode_1(T *testing.T) { //test function starts with "Test" and takes a pointer to type testing.T
- var t TestStruct
- t.X = 345535
- t.Y = 23432435443
- t.Z = 255
- t.K = []byte("TEST6")
- t.W = true
- t.T = "hello"
- t.U = cipher.PubKey{1, 2, 3, 0, 5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
-
- b := Serialize(t)
+func Test_Encode_1(t *testing.T) {
+ var ts TestStruct
+ ts.X = 345535
+ ts.Y = 23432435443
+ ts.Z = 255
+ ts.K = []byte("TEST6")
+ ts.W = true
+ ts.T = "hello"
+ ts.U = cipher.PubKey{1, 2, 3, 0, 5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- var buf bytes.Buffer
- buf.Write(b)
+ b := Serialize(ts)
- var t2 TestStruct
- err := Deserialize(&buf, len(b), &t2)
- if err != nil {
- T.Fatal(err)
- }
+ var ts2 TestStruct
+ n, err := DeserializeRaw(b, &ts2)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
- b2 := Serialize(t2)
+ b2 := Serialize(ts2)
- if bytes.Compare(b, b2) != 0 {
- T.Fatal()
- }
+ c := bytes.Compare(b, b2)
+ require.Equal(t, c, 0)
}
-func Test_Encode_2a(T *testing.T) { //test function starts with "Test" and takes a pointer to type testing.T
- var t TestStruct2
- t.X = 345535
- t.Y = 23432435443
- t.Z = 255
- t.W = false
+func Test_Encode_2a(t *testing.T) {
+ var ts TestStruct2
+ ts.X = 345535
+ ts.Y = 23432435443
+ ts.Z = 255
+ ts.W = false
_tt := []byte("ASDSADFSDFASDFSD")
- for i := 0; i < 8; i++ {
- t.K[i] = _tt[i]
- }
-
- b := Serialize(t)
+ copy(ts.K[:], _tt)
- var buf bytes.Buffer
- buf.Write(b)
+ b := Serialize(ts)
- var t2 TestStruct2
- err := Deserialize(&buf, len(b), &t2)
- if err != nil {
- T.Fatal(err)
- }
+ var ts2 TestStruct2
+ n, err := DeserializeRaw(b, &ts2)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
- b2 := Serialize(t2)
+ b2 := Serialize(ts2)
- if bytes.Compare(b, b2) != 0 {
- T.Fatal()
- }
+ c := bytes.Compare(b, b2)
+ require.Equal(t, c, 0)
}
-func Test_Encode_2b(T *testing.T) { //test function starts with "Test" and takes a pointer to type testing.T
- var t TestStruct2
- t.X = 345535
- t.Y = 23432435443
- t.Z = 255
+func Test_Encode_2b(t *testing.T) {
+ var ts TestStruct2
+ ts.X = 345535
+ ts.Y = 23432435443
+ ts.Z = 255
_tt := []byte("ASDSADFSDFASDFSD")
- for i := 0; i < 8; i++ {
- t.K[i] = _tt[i]
- }
+ copy(ts.K[:], _tt)
- b := Serialize(t)
+ b := Serialize(ts)
- var t2 TestStruct2
- err := DeserializeRaw(b, &t2)
- if err != nil {
- T.Fatal(err)
- }
+ var ts2 TestStruct2
+ n, err := DeserializeRaw(b, &ts2)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
- b2 := Serialize(t2)
+ b2 := Serialize(ts2)
- if bytes.Compare(b, b2) != 0 {
- T.Fatal()
- }
+ c := bytes.Compare(b, b2)
+ require.Equal(t, c, 0)
}
type TestStruct3 struct {
@@ -155,55 +131,44 @@ type TestStruct3 struct {
K []byte
}
-func Test_Encode_3a(T *testing.T) { //test function starts with "Test" and takes a pointer to type testing.T
+func Test_Encode_3a(t *testing.T) {
var t1 TestStruct3
t1.X = 345535
- t1.K = randBytes(32)
+ t1.K = randBytes(t, 32)
b := Serialize(t1)
- var buf bytes.Buffer
- buf.Write(b)
-
var t2 TestStruct3
- err := Deserialize(&buf, len(b), &t2)
- if err != nil {
- T.Fatal(err)
- }
+ n, err := DeserializeRaw(b, &t2)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
- if t1.X != t2.X || len(t1.K) != len(t2.K) || bytes.Compare(t1.K, t2.K) != 0 {
- T.Fatal()
- }
+ require.False(t, t1.X != t2.X || len(t1.K) != len(t2.K) || !bytes.Equal(t1.K, t2.K))
b2 := Serialize(t2)
- if bytes.Compare(b, b2) != 0 {
- T.Fatal()
- }
+ c := bytes.Compare(b, b2)
+ require.Equal(t, c, 0)
}
-func Test_Encode_3b(T *testing.T) { //test function starts with "Test" and takes a pointer to type testing.T
+func Test_Encode_3b(t *testing.T) {
var t1 TestStruct3
t1.X = 345535
- t1.K = randBytes(32)
+ t1.K = randBytes(t, 32)
b := Serialize(t1)
var t2 TestStruct3
- err := DeserializeRaw(b, &t2)
- if err != nil {
- T.Fatal(err)
- }
+ n, err := DeserializeRaw(b, &t2)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
- if t1.X != t2.X || len(t1.K) != len(t2.K) || bytes.Compare(t1.K, t2.K) != 0 {
- T.Fatal()
- }
+ require.False(t, t1.X != t2.X || len(t1.K) != len(t2.K) || !bytes.Equal(t1.K, t2.K))
b2 := Serialize(t2)
- if bytes.Compare(b, b2) != 0 {
- T.Fatal()
- }
+ c := bytes.Compare(b, b2)
+ require.Equal(t, c, 0)
}
type TestStruct4 struct {
@@ -216,51 +181,33 @@ type TestStruct5 struct {
A []TestStruct4
}
-func Test_Encode_4(T *testing.T) {
+func Test_Encode_4(t *testing.T) {
var t1 TestStruct5
t1.X = 345535
- const NUM = 8
- t1.A = make([]TestStruct4, NUM)
+ t1.A = make([]TestStruct4, 8)
b := Serialize(t1)
var t2 TestStruct5
- err := DeserializeRaw(b, &t2)
- if err != nil {
- T.Fatal(err)
- }
+ n, err := DeserializeRaw(b, &t2)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
- if t1.X != t2.X {
- T.Fatal("TestStruct5.X not equal")
- }
+ require.Equal(t, t1.X, t2.X, "TestStruct5.X not equal")
- if len(t1.A) != len(t2.A) {
- T.Fatal("Slice lengths not equal")
- }
+ require.Equal(t, len(t1.A), len(t2.A), "Slice lengths not equal: %d != %d", len(t1.A), len(t2.A))
for i, ts := range t1.A {
- if ts != t2.A[i] {
- T.Fatal("Slice values not equal")
- }
+ require.Equal(t, ts, t2.A[i], "Slice values not equal")
}
b2 := Serialize(t2)
- if bytes.Compare(b, b2) != 0 {
- T.Fatal()
- }
+ require.True(t, bytes.Equal(b, b2))
}
-// type TestStruct2 struct {
-// X int32
-// Y int64
-// Z uint8
-// K [8]byte
-// }
-
-func Test_Encode_5(T *testing.T) {
-
+func TestDeserializeRawToValue(t *testing.T) {
var ts TestStruct2
ts.X = 345535
ts.Y = 23432435443
@@ -268,67 +215,81 @@ func Test_Encode_5(T *testing.T) {
b1 := Serialize(ts)
- var t = reflect.TypeOf(ts)
- var v = reflect.New(t) //pointer to type t
+ var tts = reflect.TypeOf(ts)
+ var v = reflect.New(tts) // pointer to type tts
- //New returns a Value representing a pointer to a new zero value for the specified type.
- //That is, the returned Value's Type is PtrTo(t).
-
- _, err := DeserializeRawToValue(b1, v)
- if err != nil {
- T.Fatal(err)
- }
+ n, err := DeserializeRawToValue(b1, v)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(b1)), n)
v = reflect.Indirect(v)
- if v.FieldByName("X").Int() != int64(ts.X) {
- T.Fatalf("X not equal")
- }
- if v.FieldByName("Y").Int() != ts.Y {
- T.Fatalf("Y not equal")
- }
- if v.FieldByName("Z").Uint() != uint64(ts.Z) {
- T.Fatalf("Z not equal")
- }
-}
+ require.Equal(t, int64(ts.X), v.FieldByName("X").Int())
+ require.Equal(t, ts.Y, v.FieldByName("Y").Int())
+ require.Equal(t, uint64(ts.Z), v.FieldByName("Z").Uint())
+
+ ss := []string{"foo", "bar", "baz"}
+ expectedLen := (4+3)*3 + 4
+ extraLen := 4
+ b := Serialize(ss)
+ require.Equal(t, expectedLen, len(b))
+
+ // Pad extra bytes onto the array to test that it does not change the decoding
+ b = append(b, make([]byte, extraLen)...)
+
+ var ssd []string
+ n, err = DeserializeRawToValue(b, reflect.ValueOf(&ssd))
+ require.Equal(t, uint64(expectedLen), n)
+ require.NoError(t, err)
-func Test_Encode_IgnoreTagSerialize(T *testing.T) {
- var t TestStructIgnore
- t.X = 345535
- t.Y = 23432435443
- t.Z = 255
- t.K = []byte("TEST6")
+ // Not a pointer
+ _, err = DeserializeRawToValue(b, reflect.ValueOf(ts))
+ require.Equal(t, errors.New("DeserializeRawToValue value must be a ptr, is struct"), err)
- b := Serialize(t)
- var buf bytes.Buffer
- buf.Write(b)
+ // Map is ok
+ m := map[string]int64{"foo": 32, "bar": 64}
+ b = Serialize(m)
+ require.NotEmpty(t, b)
- var t2 TestStructIgnore
- t.X = 0
- t.Y = 0
- t.Z = 0
- t.K = []byte("")
- err := Deserialize(&buf, len(b), &t2)
- if err != nil {
- T.Fatal(err)
- }
+ mm := map[string]int64{}
+ _, err = DeserializeRawToValue(b, reflect.ValueOf(mm))
+ require.NoError(t, err)
+ require.Equal(t, m, mm)
- if t2.Z != 0 {
- T.Fatalf("Z should not deserialize. It is %d", t2.Z)
- }
+ // Map pointer is ok
+ mm = map[string]int64{}
+ _, err = DeserializeRawToValue(b, reflect.ValueOf(&mm))
+ require.NoError(t, err)
+ require.Equal(t, m, mm)
+}
- buf.Reset()
- buf.Write(b)
+func Test_Encode_IgnoreTagSerialize(t *testing.T) {
+ var ts TestStructIgnore
+ ts.X = 345535
+ ts.Y = 23432435443
+ ts.Z = 255
+ ts.K = []byte("TEST6")
- var t3 TestStructWithoutIgnore
- err = Deserialize(&buf, len(b), &t3)
- if err != nil {
- T.Fatal(err)
- }
+ b := Serialize(ts)
- b2 := Serialize(t2)
- if bytes.Compare(b, b2) != 0 {
- T.Fatal()
- }
+ var ts2 TestStructIgnore
+ ts.X = 0
+ ts.Y = 0
+ ts.Z = 0
+ ts.K = []byte("")
+ n, err := DeserializeRaw(b, &ts2)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
+
+ require.Equal(t, uint8(0), ts2.Z, "Z should not deserialize. It is %d", ts2.Z)
+
+ var ts3 TestStructWithoutIgnore
+ n, err = DeserializeRaw(b, &ts3)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
+
+ b2 := Serialize(ts2)
+ c := bytes.Compare(b, b2)
+ require.Equal(t, c, 0)
}
type Contained struct {
@@ -343,7 +304,7 @@ type Container struct {
}
func TestEncodeNestedSlice(t *testing.T) {
- size := 0
+ size := uint64(0)
elems := make([]Contained, 4)
for i := range elems {
elems[i].X = uint32(i)
@@ -354,48 +315,33 @@ func TestEncodeNestedSlice(t *testing.T) {
for j := range elems[i].Bytes {
elems[i].Bytes[j] = uint8(j)
}
- size += 4 + i*1
+ size += 4 + uint64(i*1)
elems[i].Ints = make([]uint16, i)
for j := range elems[i].Ints {
elems[i].Ints[j] = uint16(j)
}
- size += 4 + i*2
+ size += 4 + uint64(i*2)
}
c := Container{elems}
- n, err := datasizeWrite(reflect.ValueOf(c))
- if err != nil {
- t.Fatalf("datasizeWrite failed: %v", err)
- }
- if n != size+4 {
- t.Fatal("Wrong data size")
- }
+ n := datasizeWrite(reflect.ValueOf(c))
+ require.False(t, n != size+4, "Wrong data size")
+
b := Serialize(c)
d := Container{}
- err = DeserializeRaw(b, &d)
- if err != nil {
- t.Fatalf("DeserializeRaw failed: %v", err)
- }
+ n, err := DeserializeRaw(b, &d)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
+
for i, e := range d.Elements {
- if c.Elements[i].X != e.X || c.Elements[i].Y != e.Y {
- t.Fatalf("Deserialized x, y to invalid value. "+
- "Expected %d,%d but got %d,%d", c.Elements[i].X,
- c.Elements[i].Y, e.X, e.Y)
- }
- if len(c.Elements[i].Bytes) != len(e.Bytes) {
- t.Fatal("Deserialized Bytes to invalid length")
- }
+ require.Equal(t, c.Elements[i].X, e.X)
+ require.Equal(t, c.Elements[i].Y, e.Y)
+ require.Equal(t, len(c.Elements[i].Bytes), len(e.Bytes))
for j, b := range c.Elements[i].Bytes {
- if c.Elements[i].Bytes[j] != b {
- t.Fatal("Deserialized to invalid value")
- }
- }
- if len(c.Elements[i].Ints) != len(e.Ints) {
- t.Fatal("Deserialized Ints to invalid length")
+ require.Equal(t, c.Elements[i].Bytes[j], b)
}
+ require.Equal(t, len(c.Elements[i].Ints), len(e.Ints))
for j, b := range c.Elements[i].Ints {
- if c.Elements[i].Ints[j] != b {
- t.Fatal("Deserialized Ints to invalid value")
- }
+ require.Equal(t, c.Elements[i].Ints[j], b)
}
}
}
@@ -407,21 +353,15 @@ type Array struct {
func TestDecodeNotEnoughLength(t *testing.T) {
b := make([]byte, 2)
var d Array
- err := DeserializeRaw(b, &d)
- if err == nil {
- t.Fatal("Expected error")
- } else if err.Error() != "Deserialization failed" {
- t.Fatalf("Expected different error, but got %s", err.Error())
- }
+ _, err := DeserializeRaw(b, &d)
+ require.Error(t, err)
+ require.Equal(t, ErrBufferUnderflow, err)
// Test with slice
thing := make([]int, 3)
- err = DeserializeRaw(b, thing)
- if err == nil {
- t.Fatal("Expected error")
- } else if err.Error() != "Deserialization failed" {
- t.Fatal("Expected different error")
- }
+ _, err = DeserializeRaw(b, &thing)
+ require.Error(t, err)
+ require.Equal(t, ErrBufferUnderflow, err)
}
func TestFlattenMultidimensionalBytes(t *testing.T) {
@@ -433,14 +373,10 @@ func TestFlattenMultidimensionalBytes(t *testing.T) {
}
b := Serialize(data)
- expect := 16 * 16
- if len(b) != expect {
- t.Fatalf("Expected %d bytes, decoded to %d bytes", expect, len(b))
- }
-
+ require.Equal(t, 16*16, len(b))
}
-func TestMultiArrays(T *testing.T) {
+func TestMultiArrays(t *testing.T) {
var data [16][16]byte
for i := 0; i < 16; i++ {
for j := 0; j < 16; j++ {
@@ -452,126 +388,191 @@ func TestMultiArrays(T *testing.T) {
var data2 [16][16]byte
- err := DeserializeRaw(b, &data2)
- if err != nil {
- T.Fatal(err)
- }
+ n, err := DeserializeRaw(b, &data2)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
for i := 0; i < 16; i++ {
for j := 0; j < 16; j++ {
- if data[i][j] != data2[i][j] {
- T.Fatalf("failed round trip test")
- }
+ require.Equal(t, data[i][j], data2[i][j])
}
}
b2 := Serialize(data2)
- if !bytes.Equal(b, b2) {
- T.Fatalf("Failed round trip test")
- }
-
- if len(b) != 256 {
- T.Fatalf("decoded to wrong byte length")
- }
+ require.True(t, bytes.Equal(b, b2))
+ require.Equal(t, 256, len(b))
}
-func TestSerializeAtomic(t *testing.T) {
-
+func TestDeserializeAtomic(t *testing.T) {
var sp uint64 = 0x000C8A9E1809F720
b := SerializeAtomic(sp)
var i uint64
- DeserializeAtomic(b, &i)
-
- if i != sp {
- t.Fatal("round trip atomic fail")
- }
+ n, err := DeserializeAtomic(b, &i)
+ require.Equal(t, uint64(len(b)), n)
+ require.NoError(t, err)
+ require.Equal(t, sp, i)
}
-func TestPushPop(t *testing.T) {
- var sp uint64 = 0x000C8A9E1809F720
+func TestSerializeDeserializeAtomic(t *testing.T) {
+ var di64 int64
+ n, err := DeserializeAtomic(nil, &di64)
+ require.Equal(t, uint64(0), n)
+ require.Equal(t, ErrBufferUnderflow, err)
+
+ b := false
+ d := SerializeAtomic(b)
+ var bb bool
+ n, err = DeserializeAtomic(d, &bb)
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), n)
+ require.Equal(t, b, bb)
- var d [8]byte
- EncodeInt(d[0:8], sp)
+ b = true
+ bb = false
+ d = SerializeAtomic(b)
+ n, err = DeserializeAtomic(d, &bb)
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), n)
+ require.Equal(t, b, bb)
- //fmt.Printf("d= %X \n", d[:])
+ var byt byte = 0xE4
+ d = SerializeAtomic(byt)
+ var bytb byte
+ n, err = DeserializeAtomic(d, &bytb)
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), n)
+ require.Equal(t, byt, bytb)
- var ti uint64
- DecodeInt(d[0:8], &ti)
+ var u8 uint8 = 0xF7
+ d = SerializeAtomic(u8)
+ var u8b uint8
+ n, err = DeserializeAtomic(d, &u8b)
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), n)
+ require.Equal(t, u8, u8b)
- if ti != sp {
- //fmt.Printf("sp= %X ti= %X \n", sp,ti)
- t.Error("roundtrip failed")
- }
+ var u16 uint16 = 0xF720
+ d = SerializeAtomic(u16)
+ var u16b uint16
+ n, err = DeserializeAtomic(d, &u16b)
+ require.NoError(t, err)
+ require.Equal(t, uint64(2), n)
+ require.Equal(t, u16, u16b)
+
+ var u32 uint32 = 0x1809F720
+ d = SerializeAtomic(u32)
+ var u32b uint32
+ n, err = DeserializeAtomic(d, &u32b)
+ require.NoError(t, err)
+ require.Equal(t, uint64(4), n)
+ require.Equal(t, u32, u32b)
+
+ var u64 uint64 = 0x000C8A9E1809F720
+ d = SerializeAtomic(u64)
+ var u64b uint64
+ n, err = DeserializeAtomic(d, &u64b)
+ require.NoError(t, err)
+ require.Equal(t, uint64(8), n)
+ require.Equal(t, u64, u64b)
+
+ var i8 int8 = 0x69
+ d = SerializeAtomic(i8)
+ var i8b int8
+ n, err = DeserializeAtomic(d, &i8b)
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), n)
+ require.Equal(t, i8, i8b)
+
+ var i16 int16 = 0x6920
+ d = SerializeAtomic(i16)
+ var i16b int16
+ n, err = DeserializeAtomic(d, &i16b)
+ require.NoError(t, err)
+ require.Equal(t, uint64(2), n)
+ require.Equal(t, i16, i16b)
+
+ var i32 int32 = 0x1809F720
+ d = SerializeAtomic(i32)
+ var i32b int32
+ n, err = DeserializeAtomic(d, &i32b)
+ require.NoError(t, err)
+ require.Equal(t, uint64(4), n)
+ require.Equal(t, i32, i32b)
+
+ var i64 int64 = 0x000C8A9E1809F720
+ d = SerializeAtomic(i64)
+ var i64b int64
+ n, err = DeserializeAtomic(d, &i64b)
+ require.NoError(t, err)
+ require.Equal(t, uint64(8), n)
+ require.Equal(t, i64, i64b)
}
type TestStruct5a struct {
Test uint64
}
-func TestPanicTest(t *testing.T) {
-
- defer func() {
- if r := recover(); r == nil {
- t.Error("EncodeInt Did not panic")
- }
- }()
+func TestSerializeAtomicPanics(t *testing.T) {
+ var x float32
+ require.PanicsWithValue(t, "SerializeAtomic unhandled type", func() {
+ SerializeAtomic(x)
+ })
- log.Panic()
+ var tst TestStruct5a
+ require.PanicsWithValue(t, "SerializeAtomic unhandled type", func() {
+ SerializeAtomic(&tst)
+ })
+ require.PanicsWithValue(t, "SerializeAtomic unhandled type", func() {
+ SerializeAtomic(tst)
+ })
}
-func TestPushPopNegative(t *testing.T) {
+func TestDeserializeAtomicPanics(t *testing.T) {
+ var y int8
+ require.PanicsWithValue(t, "DeserializeAtomic unhandled type", func() {
+ _, _ = DeserializeAtomic(nil, y) //nolint:errcheck
+ })
- defer func() {
- if r := recover(); r == nil {
- t.Error("EncodeInt Did not panic on invalid input type")
- }
- }()
+ var x float32
+ require.PanicsWithValue(t, "DeserializeAtomic unhandled type", func() {
+ _, _ = DeserializeAtomic(nil, &x) //nolint:errcheck
+ })
var tst TestStruct5a
- //var sp uint64 = 0x000C8A9E1809F720
- var d [8]byte
- EncodeInt(d[0:8], &tst) //attemp to encode invalid type
-
+ d := make([]byte, 8)
+ require.PanicsWithValue(t, "DeserializeAtomic unhandled type", func() {
+ _, _ = DeserializeAtomic(d, &tst) //nolint:errcheck
+ })
}
func TestByteArray(t *testing.T) {
-
tstr := "7105a46cb4c2810f0c916e0bb4b4e4ef834ad42040c471b42c96d356a9fd1b21"
d, err := hex.DecodeString(tstr)
- if err != nil {
- t.Fail()
- }
+ require.NoError(t, err)
- buff := Serialize(d)
+ buf := Serialize(d)
var buff2 [32]byte
- copy(buff2[0:32], buff[0:32])
-
- if len(buff2) != 32 {
- t.Errorf("incorrect serialization length for fixed sized arrays: %d byte fixed sized array serialized to %d bytes \n", len(d), len(buff2))
- }
+ copy(buff2[0:32], buf[0:32])
+ require.Equal(t, 32, len(buff2), "incorrect serialization length for fixed sized arrays: %d byte fixed sized array serialized to %d bytes", len(d), len(buff2))
}
func TestEncodeDictInt2Int(t *testing.T) {
m1 := map[uint8]uint64{0: 0, 1: 1, 2: 2}
- buff := Serialize(m1)
- if len(buff) != 4 /* Length */ +(1+8)*len(m1) /* 1b key + 8b value per entry */ {
- t.Fail()
- }
+ buf := Serialize(m1)
+ require.Equal(t, len(buf), 4+(1+8)*len(m1))
m2 := make(map[uint8]uint64)
- if DeserializeRaw(buff, m2) != nil {
- t.Fail()
- }
- if len(m1) != len(m2) {
- t.Errorf("Expected length %d but got %d", len(m1), len(m2))
- }
+ n, err := DeserializeRaw(buf, m2)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(buf)), n)
+
+ require.Equal(t, len(m1), len(m2), "Expected length %d but got %d", len(m1), len(m2))
+
for key := range m1 {
- if m1[key] != m2[key] {
- t.Errorf("Expected value %d for key %d but got %d", m1[key], key, m2[key])
- }
+ require.Equal(t, m1[key], m2[key], "Expected value %d for key %d but got %d", m1[key], key, m2[key])
}
}
@@ -584,48 +585,45 @@ type TestStructWithDict struct {
func TestEncodeDictNested(t *testing.T) {
s1 := TestStructWithDict{
- 0x01234567,
- 0x0123456789ABCDEF,
- map[uint8]TestStruct{
+ X: 0x01234567,
+ Y: 0x0123456789ABCDEF,
+ M: map[uint8]TestStruct{
0x01: TestStruct{
- 0x01234567,
- 0x0123456789ABCDEF,
- 0x01,
- []byte{0, 1, 2},
- true,
- "ab",
- cipher.PubKey{
+ X: 0x01234567,
+ Y: 0x0123456789ABCDEF,
+ Z: 0x01,
+ K: []byte{0, 1, 2},
+ W: true,
+ T: "ab",
+ U: cipher.PubKey{
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
},
},
0x23: TestStruct{
- 0x01234567,
- 0x0123456789ABCDEF,
- 0x01,
- []byte{0, 1, 2},
- true,
- "cd",
- cipher.PubKey{
+ X: 0x01234567,
+ Y: 0x0123456789ABCDEF,
+ Z: 0x01,
+ K: []byte{0, 1, 2},
+ W: true,
+ T: "cd",
+ U: cipher.PubKey{
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
},
},
},
- []byte{0, 1, 2, 3, 4},
- }
- buff := Serialize(s1)
- if len(buff) == 0 {
- t.Fail()
+ K: []byte{0, 1, 2, 3, 4},
}
+ buf := Serialize(s1)
+ require.NotEmpty(t, buf)
s2 := TestStructWithDict{}
- if DeserializeRaw(buff, &s2) != nil {
- t.Fail()
- }
- if !reflect.DeepEqual(s1, s2) {
- t.Errorf("Expected %v but got %v", s1, s2)
- }
+ n, err := DeserializeRaw(buf, &s2)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(buf)), n)
+
+ require.True(t, reflect.DeepEqual(s1, s2), "Expected %v but got %v", s1, s2)
}
func TestEncodeDictString2Int64(t *testing.T) {
@@ -637,8 +635,9 @@ func TestEncodeDictString2Int64(t *testing.T) {
b := Serialize(v)
v2 := make(map[string]int64)
- err := DeserializeRaw(b, &v2)
+ n, err := DeserializeRaw(b, &v2)
require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
require.Equal(t, v, v2)
}
@@ -679,8 +678,9 @@ func TestOmitEmptyString(t *testing.T) {
}
var y omitString
- err := DeserializeRaw(b, &y)
+ n, err := DeserializeRaw(b, &y)
require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
require.Equal(t, tc.input, y)
})
@@ -733,8 +733,9 @@ func TestOmitEmptySlice(t *testing.T) {
}
var y omitSlice
- err := DeserializeRaw(b, &y)
+ n, err := DeserializeRaw(b, &y)
require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
expect := tc.expect
if expect == nil {
@@ -792,8 +793,9 @@ func TestOmitEmptyMap(t *testing.T) {
}
var y omitMap
- err := DeserializeRaw(b, &y)
+ n, err := DeserializeRaw(b, &y)
require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
expect := tc.expect
if expect == nil {
@@ -872,8 +874,9 @@ func TestOmitEmptyMixedFinalByte(t *testing.T) {
require.NotEmpty(t, b)
var y omitMixed
- err := DeserializeRaw(b, &y)
+ n, err := DeserializeRaw(b, &y)
require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
require.Equal(t, tc.expect, y)
})
@@ -892,3 +895,569 @@ func TestOmitEmptyFinalFieldOnly(t *testing.T) {
Serialize(b)
})
}
+
+func TestTagOmitempty(t *testing.T) {
+ cases := []struct {
+ tag string
+ omitempty bool
+ }{
+ {
+ tag: "foo,omitempty",
+ omitempty: true,
+ },
+ {
+ tag: "omitempty",
+ omitempty: false,
+ },
+ {
+ tag: ",omitempty",
+ omitempty: true,
+ },
+ {
+ tag: "",
+ omitempty: false,
+ },
+ {
+ tag: "-",
+ omitempty: false,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.tag, func(t *testing.T) {
+ omitempty := TagOmitempty(tc.tag)
+ require.Equal(t, tc.omitempty, omitempty)
+ })
+ }
+}
+
+type primitiveInts struct {
+ A int8
+ B uint8
+ C int16
+ D uint16
+ E int32
+ F uint32
+ G int64
+ H uint64
+}
+
+func TestPrimitiveInts(t *testing.T) {
+ cases := []struct {
+ name string
+ c primitiveInts
+ }{
+ {
+ name: "all maximums",
+ c: primitiveInts{
+ A: math.MaxInt8,
+ B: math.MaxUint8,
+ C: math.MaxInt16,
+ D: math.MaxUint16,
+ E: math.MaxInt32,
+ F: math.MaxUint32,
+ G: math.MaxInt64,
+ H: math.MaxUint64,
+ },
+ },
+ {
+ name: "negative integers",
+ c: primitiveInts{
+ A: -99,
+ C: -99,
+ E: -99,
+ G: -99,
+ },
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ bytes := Serialize(tc.c)
+ require.NotEmpty(t, bytes)
+
+ var obj primitiveInts
+ n, err := DeserializeRaw(bytes, &obj)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(bytes)), n)
+ require.Equal(t, tc.c, obj)
+ })
+ }
+}
+
+type hasEveryType struct {
+ A int8
+ B int16
+ C int32
+ D int64
+ E uint8
+ F uint16
+ G uint32
+ H uint64
+ I bool
+ J byte
+ K string
+ L []byte // slice, byte type
+ M []int64 // slice, non-byte type
+ N [3]byte // array, byte type
+ O [3]int64 // array, non-byte type
+ P struct {
+ A int8
+ B uint16
+ } // struct
+ Q map[string]string // map
+ R float32
+ S float64
+}
+
+func TestEncodeStable(t *testing.T) {
+ // Tests encoding against previously encoded data on disk to verify
+ // that encoding results have not changed
+ update := false
+
+ x := hasEveryType{
+ A: -127,
+ B: math.MaxInt16,
+ C: math.MaxInt32,
+ D: math.MaxInt64,
+ E: math.MaxInt8 + 1,
+ F: math.MaxInt16 + 1,
+ G: math.MaxInt32 + 1,
+ H: math.MaxInt64 + 1,
+ I: true,
+ J: byte(128),
+ K: "foo",
+ L: []byte("bar"),
+ M: []int64{math.MaxInt64, math.MaxInt64 / 2, -10000},
+ N: [3]byte{'b', 'a', 'z'},
+ O: [3]int64{math.MaxInt64, math.MaxInt64 / 2, -10000},
+ P: struct {
+ A int8
+ B uint16
+ }{
+ A: -127,
+ B: math.MaxUint16,
+ },
+ Q: map[string]string{"foo": "bar"},
+ R: float32(123.45),
+ S: float64(123.45),
+ }
+
+ goldenFile := "testdata/encode-every-type.golden"
+
+ if update {
+ f, err := os.Create(goldenFile)
+ require.NoError(t, err)
+ defer f.Close()
+
+ b := Serialize(x)
+ _, err = f.Write(b)
+ require.NoError(t, err)
+ return
+ }
+
+ f, err := os.Open(goldenFile)
+ require.NoError(t, err)
+ defer f.Close()
+
+ d, err := ioutil.ReadAll(f)
+ require.NoError(t, err)
+
+ var y hasEveryType
+ n, err := DeserializeRaw(d, &y)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(d)), n)
+ require.Equal(t, x, y)
+
+ b := Serialize(x)
+ require.Equal(t, len(d), len(b))
+ require.Equal(t, d, b)
+}
+
+func TestEncodeByteSlice(t *testing.T) {
+ type foo struct {
+ W int8
+ X []byte
+ Y int8 // these are added to make sure extra fields don't interact with the byte encoding
+ }
+
+ f := foo{
+ W: 1,
+ X: []byte("abc"),
+ Y: 2,
+ }
+
+ expect := []byte{1, 3, 0, 0, 0, 97, 98, 99, 2}
+
+ b := Serialize(f)
+ require.Equal(t, expect, b)
+}
+
+func TestEncodeByteArray(t *testing.T) {
+ type foo struct {
+ W int8
+ X [3]byte
+ Y int8 // these are added to make sure extra fields don't interact with the byte encoding
+ }
+
+ f := foo{
+ W: 1,
+ X: [3]byte{'a', 'b', 'c'},
+ Y: 2,
+ }
+
+ expect := []byte{1, 97, 98, 99, 2}
+
+ b := Serialize(f)
+ require.Equal(t, expect, b)
+}
+
+func TestEncodeEmptySlice(t *testing.T) {
+ // Decoding an empty slice should not allocate
+ type foo struct {
+ X []byte
+ Y []int64
+ }
+
+ f := &foo{}
+ b := Serialize(f)
+
+ var g foo
+ n, err := DeserializeRaw(b, &g)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
+ require.Nil(t, g.X)
+ require.Nil(t, g.Y)
+}
+
+func TestRandomGarbage(t *testing.T) {
+ // Basic fuzz test to check for panics, deserializes random data
+
+ // initialize the struct with data in the variable sized fields
+ x := hasEveryType{
+ K: "string",
+ L: []byte("bar"),
+ M: []int64{math.MaxInt64, math.MaxInt64 / 2, -10000},
+ Q: map[string]string{"foo": "bar", "cat": "dog"},
+ }
+
+ size := datasizeWrite(reflect.ValueOf(x))
+
+ var y hasEveryType
+ for j := 0; j < 100; j++ {
+ for i := uint64(0); i < size*2; i++ {
+ b := randBytes(t, i)
+ _, _ = DeserializeRaw(b, &y) //nolint:errcheck
+ }
+ }
+
+ for i := 0; i < 10000; i++ {
+ b := randBytes(t, size)
+ _, _ = DeserializeRaw(b, &y) //nolint:errcheck
+ }
+}
+
+func TestDeserializeRawBufferRemains(t *testing.T) {
+ x := hasEveryType{}
+ b := Serialize(x)
+ require.NotEmpty(t, b)
+
+ b = append(b, make([]byte, 3)...)
+ var y hasEveryType
+ n, err := DeserializeRaw(b, &y)
+ require.NoError(t, err)
+ require.NotEmpty(t, n)
+ require.True(t, uint64(len(b)) > n)
+}
+
+func TestDeserializeRawNotPointer(t *testing.T) {
+ x := hasEveryType{
+ E: math.MaxInt8 + 1,
+ F: math.MaxInt16 + 1,
+ G: math.MaxInt32 + 1,
+ H: math.MaxInt64 + 1,
+ }
+ b := Serialize(x)
+ require.NotEmpty(t, b)
+
+ var y hasEveryType
+ n, err := DeserializeRaw(b, y)
+ require.Equal(t, errors.New("DeserializeRaw value must be a ptr, is struct"), err)
+ require.Empty(t, n)
+
+ a := []string{"foo", "bar", "baz"}
+ b = Serialize(a)
+ require.NotEmpty(t, b)
+
+ aa := make([]string, 0)
+ n, err = DeserializeRaw(b, aa)
+ require.Equal(t, errors.New("DeserializeRaw value must be a ptr, is slice"), err)
+ require.Empty(t, n)
+
+ m := map[string]int64{"foo": 32, "bar": 64}
+ b = Serialize(m)
+ require.NotEmpty(t, b)
+}
+
+func TestDeserializeMaxLenExceeded(t *testing.T) {
+ // maxlen for strings
+ type Foo struct {
+ X string `enc:",maxlen=2"`
+ }
+
+ b := Serialize(Foo{X: "foo"})
+ require.NotEmpty(t, b)
+
+ var f Foo
+ n, err := DeserializeRaw(b, &f)
+ require.Equal(t, ErrMaxLenExceeded, err)
+ require.Empty(t, n)
+
+ g := Foo{X: "fo"}
+ b = Serialize(g)
+ require.NotEmpty(t, b)
+
+ f = Foo{}
+ n, err = DeserializeRaw(b, &f)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
+ require.Equal(t, g, f)
+
+ // maxlen for slices
+ type Bar struct {
+ X []string `enc:",maxlen=2"`
+ }
+
+ b = Serialize(Bar{X: []string{"f", "o", "o"}})
+ require.NotEmpty(t, b)
+
+ var k Bar
+ n, err = DeserializeRaw(b, &k)
+ require.Equal(t, ErrMaxLenExceeded, err)
+ require.Empty(t, n)
+
+ c := Bar{X: []string{"f", "o"}}
+ b = Serialize(c)
+ require.NotEmpty(t, b)
+
+ k = Bar{}
+ n, err = DeserializeRaw(b, &k)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
+ require.Equal(t, c, k)
+
+ // Invalid maxlen value panics
+ type Baz struct {
+ X string `enc:",maxlen=foo"`
+ }
+
+ b = Serialize(Baz{X: "foo"})
+ require.NotEmpty(t, b)
+
+ var z Baz
+ require.Panics(t, func() {
+ _, _ = DeserializeRaw(b, &z) //nolint:errcheck
+ })
+
+ // maxlen for final omitempty byte array
+ type Car struct {
+ X string
+ Y []byte `enc:",omitempty,maxlen=2"`
+ }
+
+ car := Car{
+ X: "foo",
+ Y: []byte("foo"),
+ }
+ b = Serialize(car)
+ require.NotEmpty(t, b)
+
+ var w Car
+ n, err = DeserializeRaw(b, &w)
+ require.Equal(t, ErrMaxLenExceeded, err)
+ require.Empty(t, n)
+
+ v := Car{
+ X: "foo",
+ Y: []byte("fo"),
+ }
+ b = Serialize(v)
+ require.NotEmpty(t, b)
+
+ w = Car{}
+ n, err = DeserializeRaw(b, &w)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(b)), n)
+ require.Equal(t, v, w)
+}
+
+func TestSerializeString(t *testing.T) {
+ cases := []struct {
+ s string
+ x []byte
+ }{
+ {
+ s: "",
+ x: []byte{0, 0, 0, 0},
+ },
+ {
+ s: "foo",
+ x: []byte{3, 0, 0, 0, 'f', 'o', 'o'},
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.s, func(t *testing.T) {
+ require.Equal(t, tc.x, SerializeString(tc.s))
+ })
+ }
+}
+
+func TestDeserializeString(t *testing.T) {
+ cases := []struct {
+ s string
+ x []byte
+ n uint64
+ maxLen int
+ err error
+ }{
+ {
+ s: "",
+ x: []byte{0, 0, 0, 0},
+ n: 4,
+ },
+ {
+ s: "foo",
+ x: []byte{3, 0, 0, 0, 'f', 'o', 'o'},
+ n: 7,
+ },
+ {
+ x: []byte{3, 0, 0},
+ err: ErrBufferUnderflow,
+ },
+ {
+ x: nil,
+ err: ErrBufferUnderflow,
+ },
+ {
+ x: []byte{3, 0, 0, 0, 'f'},
+ err: ErrBufferUnderflow,
+ },
+ {
+ s: "foo",
+ x: []byte{3, 0, 0, 0, 'f', 'o', 'o', 'x'},
+ n: 7,
+ },
+ {
+ s: "foo",
+ x: []byte{3, 0, 0, 0, 'f', 'o', 'o', 'x'},
+ maxLen: 2,
+ err: ErrMaxLenExceeded,
+ },
+ {
+ s: "foo",
+ x: []byte{3, 0, 0, 0, 'f', 'o', 'o', 'x'},
+ maxLen: 3,
+ n: 7,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(fmt.Sprintf("s=%s err=%v", tc.s, tc.err), func(t *testing.T) {
+ s, n, err := DeserializeString(tc.x, tc.maxLen)
+ if tc.err != nil {
+ require.Equal(t, tc.err, err)
+ require.Equal(t, tc.n, n)
+ return
+ }
+
+ require.NoError(t, err)
+ require.Equal(t, tc.s, s)
+ require.Equal(t, tc.n, n)
+ })
+ }
+}
+
+func TestSerializeUint32(t *testing.T) {
+ cases := []struct {
+ x uint32
+ }{
+ {
+ x: 0,
+ },
+ {
+ x: 1,
+ },
+ {
+ x: 0xFF,
+ },
+ {
+ x: 0xFFFF,
+ },
+ {
+ x: 0xFFFFFF,
+ },
+ {
+ x: 0xFFFFFFFF,
+ },
+ {
+ x: math.MaxUint32,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(fmt.Sprint(tc.x), func(t *testing.T) {
+ b := SerializeUint32(tc.x)
+ y, n, err := DeserializeUint32(b)
+ require.NoError(t, err)
+ require.Equal(t, uint64(4), n)
+ require.Equal(t, tc.x, y)
+ })
+ }
+
+ _, _, err := DeserializeUint32(make([]byte, 3))
+ require.Equal(t, ErrBufferUnderflow, err)
+
+ y, n, err := DeserializeUint32([]byte{1, 0, 0, 0, 0})
+ require.NoError(t, err)
+ require.Equal(t, uint64(4), n)
+ require.Equal(t, uint32(1), y)
+}
+
+type BoolStruct struct {
+ B bool
+}
+
+func TestEncodeBool(t *testing.T) {
+ bt := BoolStruct{
+ B: true,
+ }
+ bf := BoolStruct{
+ B: false,
+ }
+
+ buf := Serialize(bt)
+ require.Equal(t, 1, len(buf))
+ require.Equal(t, byte(1), buf[0])
+
+ var bb BoolStruct
+ n, err := DeserializeRaw(buf, &bb)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(buf)), n)
+ require.True(t, bb.B)
+
+ buf = Serialize(bf)
+ require.Equal(t, 1, len(buf))
+ require.Equal(t, byte(0), buf[0])
+
+ require.True(t, bb.B)
+ n, err = DeserializeRaw(buf, &bb)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(buf)), n)
+ require.False(t, bb.B)
+
+ buf = []byte{2}
+ n, err = DeserializeRaw(buf, &bb)
+ require.Equal(t, ErrInvalidBool, err)
+ require.Empty(t, n)
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/hash_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/hash_test.go
index d57be0f..6e66910 100644
--- a/vendor/github.com/skycoin/skycoin/src/cipher/hash_test.go
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/hash_test.go
@@ -5,101 +5,175 @@ import (
"crypto/rand"
"crypto/sha256"
"encoding/hex"
+ "errors"
"testing"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/skycoin/skycoin/src/cipher/ripemd160"
)
-func freshSumRipemd160(b []byte) Ripemd160 {
+func freshSumRipemd160(t *testing.T, b []byte) Ripemd160 {
sh := ripemd160.New()
- sh.Write(b)
+ _, err := sh.Write(b)
+ require.NoError(t, err)
h := Ripemd160{}
- h.Set(sh.Sum(nil))
+ h.MustSet(sh.Sum(nil))
return h
}
-func freshSumSHA256(b []byte) SHA256 {
+func freshSumSHA256(t *testing.T, b []byte) SHA256 {
sh := sha256.New()
- sh.Write(b)
+ _, err := sh.Write(b)
+ require.NoError(t, err)
h := SHA256{}
- h.Set(sh.Sum(nil))
+ h.MustSet(sh.Sum(nil))
return h
}
func randBytes(t *testing.T, n int) []byte {
b := make([]byte, n)
x, err := rand.Read(b)
- assert.Equal(t, n, x)
- assert.Nil(t, err)
+ require.Equal(t, n, x)
+ require.Nil(t, err)
return b
}
func TestHashRipemd160(t *testing.T) {
- assert.NotPanics(t, func() { HashRipemd160(randBytes(t, 128)) })
+ require.NotPanics(t, func() { HashRipemd160(randBytes(t, 128)) })
r := HashRipemd160(randBytes(t, 160))
- assert.NotEqual(t, r, Ripemd160{})
+ require.NotEqual(t, r, Ripemd160{})
// 2nd hash should not be affected by previous
b := randBytes(t, 256)
r2 := HashRipemd160(b)
- assert.NotEqual(t, r2, Ripemd160{})
- assert.Equal(t, r2, freshSumRipemd160(b))
+ require.NotEqual(t, r2, Ripemd160{})
+ require.Equal(t, r2, freshSumRipemd160(t, b))
}
-func TestRipemd160Set(t *testing.T) {
+func TestRipemd160MustSet(t *testing.T) {
h := Ripemd160{}
- assert.Panics(t, func() {
- h.Set(randBytes(t, 21))
+ require.Panics(t, func() {
+ h.MustSet(randBytes(t, 21))
})
- assert.Panics(t, func() {
- h.Set(randBytes(t, 100))
+ require.Panics(t, func() {
+ h.MustSet(randBytes(t, 100))
})
- assert.Panics(t, func() {
- h.Set(randBytes(t, 19))
+ require.Panics(t, func() {
+ h.MustSet(randBytes(t, 19))
})
- assert.Panics(t, func() {
- h.Set(randBytes(t, 0))
+ require.Panics(t, func() {
+ h.MustSet(randBytes(t, 0))
})
- assert.NotPanics(t, func() {
- h.Set(randBytes(t, 20))
+ require.NotPanics(t, func() {
+ h.MustSet(randBytes(t, 20))
})
b := randBytes(t, 20)
- h.Set(b)
- assert.True(t, bytes.Equal(h[:], b))
+ h.MustSet(b)
+ require.True(t, bytes.Equal(h[:], b))
}
-func TestSHA256Set(t *testing.T) {
+func TestRipemd160Set(t *testing.T) {
+ h := Ripemd160{}
+ err := h.Set(randBytes(t, 21))
+ require.Equal(t, errors.New("Invalid ripemd160 length"), err)
+ err = h.Set(randBytes(t, 100))
+ require.Equal(t, errors.New("Invalid ripemd160 length"), err)
+ err = h.Set(randBytes(t, 19))
+ require.Equal(t, errors.New("Invalid ripemd160 length"), err)
+ err = h.Set(randBytes(t, 0))
+ require.Equal(t, errors.New("Invalid ripemd160 length"), err)
+
+ b := randBytes(t, 20)
+ err = h.Set(b)
+ require.NoError(t, err)
+ require.True(t, bytes.Equal(h[:], b))
+}
+
+func TestSHA256MustSet(t *testing.T) {
h := SHA256{}
- assert.Panics(t, func() {
- h.Set(randBytes(t, 33))
+ require.Panics(t, func() {
+ h.MustSet(randBytes(t, 33))
})
- assert.Panics(t, func() {
- h.Set(randBytes(t, 100))
+ require.Panics(t, func() {
+ h.MustSet(randBytes(t, 100))
})
- assert.Panics(t, func() {
- h.Set(randBytes(t, 31))
+ require.Panics(t, func() {
+ h.MustSet(randBytes(t, 31))
})
- assert.Panics(t, func() {
- h.Set(randBytes(t, 0))
+ require.Panics(t, func() {
+ h.MustSet(randBytes(t, 0))
})
- assert.NotPanics(t, func() {
- h.Set(randBytes(t, 32))
+ require.NotPanics(t, func() {
+ h.MustSet(randBytes(t, 32))
})
b := randBytes(t, 32)
- h.Set(b)
- assert.True(t, bytes.Equal(h[:], b))
+ h.MustSet(b)
+ require.True(t, bytes.Equal(h[:], b))
+}
+
+func TestRipemd160FromBytes(t *testing.T) {
+ b := randBytes(t, 20)
+ h, err := Ripemd160FromBytes(b)
+ require.NoError(t, err)
+ require.True(t, bytes.Equal(b[:], h[:]))
+
+ b = randBytes(t, 19)
+ _, err = Ripemd160FromBytes(b)
+ require.Equal(t, errors.New("Invalid ripemd160 length"), err)
+
+ b = randBytes(t, 21)
+ _, err = Ripemd160FromBytes(b)
+ require.Equal(t, errors.New("Invalid ripemd160 length"), err)
+
+ _, err = Ripemd160FromBytes(nil)
+ require.Equal(t, errors.New("Invalid ripemd160 length"), err)
+}
+
+func TestMustRipemd160FromBytes(t *testing.T) {
+ b := randBytes(t, 20)
+ h := MustRipemd160FromBytes(b)
+ require.True(t, bytes.Equal(b[:], h[:]))
+
+ b = randBytes(t, 19)
+ require.Panics(t, func() {
+ MustRipemd160FromBytes(b)
+ })
+
+ b = randBytes(t, 21)
+ require.Panics(t, func() {
+ MustRipemd160FromBytes(b)
+ })
+
+ require.Panics(t, func() {
+ MustRipemd160FromBytes(nil)
+ })
+}
+
+func TestSHA256Set(t *testing.T) {
+ h := SHA256{}
+ err := h.Set(randBytes(t, 33))
+ require.Equal(t, errors.New("Invalid sha256 length"), err)
+ err = h.Set(randBytes(t, 100))
+ require.Equal(t, errors.New("Invalid sha256 length"), err)
+ err = h.Set(randBytes(t, 31))
+ require.Equal(t, errors.New("Invalid sha256 length"), err)
+ err = h.Set(randBytes(t, 0))
+ require.Equal(t, errors.New("Invalid sha256 length"), err)
+
+ b := randBytes(t, 32)
+ err = h.Set(b)
+ require.NoError(t, err)
+ require.True(t, bytes.Equal(h[:], b))
}
func TestSHA256Hex(t *testing.T) {
h := SHA256{}
- h.Set(randBytes(t, 32))
+ h.MustSet(randBytes(t, 32))
s := h.Hex()
h2, err := SHA256FromHex(s)
- assert.Nil(t, err)
- assert.Equal(t, h, h2)
- assert.Equal(t, h2.Hex(), s)
+ require.Nil(t, err)
+ require.Equal(t, h, h2)
+ require.Equal(t, h2.Hex(), s)
}
func TestSHA256KnownValue(t *testing.T) {
@@ -123,67 +197,95 @@ func TestSHA256KnownValue(t *testing.T) {
},
}
for _, io := range vals {
- assert.Equal(t, io.output, SumSHA256([]byte(io.input)).Hex())
+ require.Equal(t, io.output, SumSHA256([]byte(io.input)).Hex())
}
}
func TestSumSHA256(t *testing.T) {
b := randBytes(t, 256)
h1 := SumSHA256(b)
- assert.NotEqual(t, h1, SHA256{})
+ require.NotEqual(t, h1, SHA256{})
// A second call to Sum should not be influenced by the original
c := randBytes(t, 256)
h2 := SumSHA256(c)
- assert.NotEqual(t, h2, SHA256{})
- assert.Equal(t, h2, freshSumSHA256(c))
+ require.NotEqual(t, h2, SHA256{})
+ require.Equal(t, h2, freshSumSHA256(t, c))
}
func TestSHA256FromHex(t *testing.T) {
// Invalid hex hash
_, err := SHA256FromHex("cawcd")
- assert.NotNil(t, err)
+ require.NotNil(t, err)
// Truncated hex hash
h := SumSHA256(randBytes(t, 128))
_, err = SHA256FromHex(hex.EncodeToString(h[:len(h)/2]))
- assert.NotNil(t, err)
+ require.NotNil(t, err)
// Valid hex hash
h2, err := SHA256FromHex(hex.EncodeToString(h[:]))
- assert.Equal(t, h, h2)
- assert.Nil(t, err)
+ require.Equal(t, h, h2)
+ require.Nil(t, err)
}
func TestMustSHA256FromHex(t *testing.T) {
// Invalid hex hash
- assert.Panics(t, func() { MustSHA256FromHex("cawcd") })
+ require.Panics(t, func() { MustSHA256FromHex("cawcd") })
// Truncated hex hash
h := SumSHA256(randBytes(t, 128))
- assert.Panics(t, func() {
+ require.Panics(t, func() {
MustSHA256FromHex(hex.EncodeToString(h[:len(h)/2]))
})
// Valid hex hash
h2 := MustSHA256FromHex(hex.EncodeToString(h[:]))
- assert.Equal(t, h, h2)
+ require.Equal(t, h, h2)
}
-func TestMustSumSHA256(t *testing.T) {
- b := randBytes(t, 128)
- assert.Panics(t, func() { MustSumSHA256(b, 127) })
- assert.Panics(t, func() { MustSumSHA256(b, 129) })
- assert.NotPanics(t, func() { MustSumSHA256(b, 128) })
- h := MustSumSHA256(b, 128)
- assert.NotEqual(t, h, SHA256{})
- assert.Equal(t, h, freshSumSHA256(b))
+func TestSHA256FromBytes(t *testing.T) {
+ b := randBytes(t, 32)
+ h, err := SHA256FromBytes(b)
+ require.NoError(t, err)
+ require.True(t, bytes.Equal(b[:], h[:]))
+
+ b = randBytes(t, 31)
+ _, err = SHA256FromBytes(b)
+ require.Equal(t, errors.New("Invalid sha256 length"), err)
+
+ b = randBytes(t, 33)
+ _, err = SHA256FromBytes(b)
+ require.Equal(t, errors.New("Invalid sha256 length"), err)
+
+ _, err = SHA256FromBytes(nil)
+ require.Equal(t, errors.New("Invalid sha256 length"), err)
+}
+
+func TestMustSHA256FromBytes(t *testing.T) {
+ b := randBytes(t, 32)
+ h := MustSHA256FromBytes(b)
+ require.True(t, bytes.Equal(b[:], h[:]))
+
+ b = randBytes(t, 31)
+ require.Panics(t, func() {
+ MustSHA256FromBytes(b)
+ })
+
+ b = randBytes(t, 33)
+ require.Panics(t, func() {
+ MustSHA256FromBytes(b)
+ })
+
+ require.Panics(t, func() {
+ MustSHA256FromBytes(nil)
+ })
}
func TestDoubleSHA256(t *testing.T) {
b := randBytes(t, 128)
h := DoubleSHA256(b)
- assert.NotEqual(t, h, SHA256{})
- assert.NotEqual(t, h, freshSumSHA256(b))
+ require.NotEqual(t, h, SHA256{})
+ require.NotEqual(t, h, freshSumSHA256(t, b))
}
func TestAddSHA256(t *testing.T) {
@@ -192,10 +294,10 @@ func TestAddSHA256(t *testing.T) {
c := randBytes(t, 64)
i := SumSHA256(c)
add := AddSHA256(h, i)
- assert.NotEqual(t, add, SHA256{})
- assert.NotEqual(t, add, h)
- assert.NotEqual(t, add, i)
- assert.Equal(t, add, SumSHA256(append(h[:], i[:]...)))
+ require.NotEqual(t, add, SHA256{})
+ require.NotEqual(t, add, h)
+ require.NotEqual(t, add, i)
+ require.Equal(t, add, SumSHA256(append(h[:], i[:]...)))
}
func TestXorSHA256(t *testing.T) {
@@ -203,10 +305,10 @@ func TestXorSHA256(t *testing.T) {
c := randBytes(t, 128)
h := SumSHA256(b)
i := SumSHA256(c)
- assert.NotEqual(t, h.Xor(i), h)
- assert.NotEqual(t, h.Xor(i), i)
- assert.NotEqual(t, h.Xor(i), SHA256{})
- assert.Equal(t, h.Xor(i), i.Xor(h))
+ require.NotEqual(t, h.Xor(i), h)
+ require.NotEqual(t, h.Xor(i), i)
+ require.NotEqual(t, h.Xor(i), SHA256{})
+ require.Equal(t, h.Xor(i), i.Xor(h))
}
func TestSHA256Null(t *testing.T) {
@@ -238,34 +340,34 @@ func TestNextPowerOfTwo(t *testing.T) {
{65537, 131072},
}
for _, i := range inputs {
- assert.Equal(t, nextPowerOfTwo(i[0]), i[1])
+ require.Equal(t, nextPowerOfTwo(i[0]), i[1])
}
for i := uint64(2); i < 10000; i++ {
p := nextPowerOfTwo(i)
- assert.Equal(t, p%2, uint64(0))
- assert.True(t, p >= i)
+ require.Equal(t, p%2, uint64(0))
+ require.True(t, p >= i)
}
}
func TestMerkle(t *testing.T) {
h := SumSHA256(randBytes(t, 128))
// Single hash input returns hash
- assert.Equal(t, Merkle([]SHA256{h}), h)
+ require.Equal(t, Merkle([]SHA256{h}), h)
h2 := SumSHA256(randBytes(t, 128))
// 2 hashes should be AddSHA256 of them
- assert.Equal(t, Merkle([]SHA256{h, h2}), AddSHA256(h, h2))
+ require.Equal(t, Merkle([]SHA256{h, h2}), AddSHA256(h, h2))
// 3 hashes should be Add(Add())
h3 := SumSHA256(randBytes(t, 128))
out := AddSHA256(AddSHA256(h, h2), AddSHA256(h3, SHA256{}))
- assert.Equal(t, Merkle([]SHA256{h, h2, h3}), out)
+ require.Equal(t, Merkle([]SHA256{h, h2, h3}), out)
// 4 hashes should be Add(Add())
h4 := SumSHA256(randBytes(t, 128))
out = AddSHA256(AddSHA256(h, h2), AddSHA256(h3, h4))
- assert.Equal(t, Merkle([]SHA256{h, h2, h3, h4}), out)
+ require.Equal(t, Merkle([]SHA256{h, h2, h3, h4}), out)
// 5 hashes
h5 := SumSHA256(randBytes(t, 128))
out = AddSHA256(AddSHA256(h, h2), AddSHA256(h3, h4))
out = AddSHA256(out, AddSHA256(AddSHA256(h5, SHA256{}),
AddSHA256(SHA256{}, SHA256{})))
- assert.Equal(t, Merkle([]SHA256{h, h2, h3, h4, h5}), out)
+ require.Equal(t, Merkle([]SHA256{h, h2, h3, h4, h5}), out)
}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/pbkdf2/pbkdf2_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/pbkdf2/pbkdf2_test.go
new file mode 100644
index 0000000..1fc814f
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/pbkdf2/pbkdf2_test.go
@@ -0,0 +1,157 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pbkdf2
+
+import (
+ "bytes"
+ "crypto/sha1" //nolint:gosec
+ "crypto/sha256"
+ "hash"
+ "testing"
+)
+
+type testVector struct {
+ password string
+ salt string
+ iter int
+ output []byte
+}
+
+// Test vectors from RFC 6070, http://tools.ietf.org/html/rfc6070
+var sha1TestVectors = []testVector{
+ {
+ "password",
+ "salt",
+ 1,
+ []byte{
+ 0x0c, 0x60, 0xc8, 0x0f, 0x96, 0x1f, 0x0e, 0x71,
+ 0xf3, 0xa9, 0xb5, 0x24, 0xaf, 0x60, 0x12, 0x06,
+ 0x2f, 0xe0, 0x37, 0xa6,
+ },
+ },
+ {
+ "password",
+ "salt",
+ 2,
+ []byte{
+ 0xea, 0x6c, 0x01, 0x4d, 0xc7, 0x2d, 0x6f, 0x8c,
+ 0xcd, 0x1e, 0xd9, 0x2a, 0xce, 0x1d, 0x41, 0xf0,
+ 0xd8, 0xde, 0x89, 0x57,
+ },
+ },
+ {
+ "password",
+ "salt",
+ 4096,
+ []byte{
+ 0x4b, 0x00, 0x79, 0x01, 0xb7, 0x65, 0x48, 0x9a,
+ 0xbe, 0xad, 0x49, 0xd9, 0x26, 0xf7, 0x21, 0xd0,
+ 0x65, 0xa4, 0x29, 0xc1,
+ },
+ },
+ // // This one takes too long
+ // {
+ // "password",
+ // "salt",
+ // 16777216,
+ // []byte{
+ // 0xee, 0xfe, 0x3d, 0x61, 0xcd, 0x4d, 0xa4, 0xe4,
+ // 0xe9, 0x94, 0x5b, 0x3d, 0x6b, 0xa2, 0x15, 0x8c,
+ // 0x26, 0x34, 0xe9, 0x84,
+ // },
+ // },
+ {
+ "passwordPASSWORDpassword",
+ "saltSALTsaltSALTsaltSALTsaltSALTsalt",
+ 4096,
+ []byte{
+ 0x3d, 0x2e, 0xec, 0x4f, 0xe4, 0x1c, 0x84, 0x9b,
+ 0x80, 0xc8, 0xd8, 0x36, 0x62, 0xc0, 0xe4, 0x4a,
+ 0x8b, 0x29, 0x1a, 0x96, 0x4c, 0xf2, 0xf0, 0x70,
+ 0x38,
+ },
+ },
+ {
+ "pass\000word",
+ "sa\000lt",
+ 4096,
+ []byte{
+ 0x56, 0xfa, 0x6a, 0xa7, 0x55, 0x48, 0x09, 0x9d,
+ 0xcc, 0x37, 0xd7, 0xf0, 0x34, 0x25, 0xe0, 0xc3,
+ },
+ },
+}
+
+// Test vectors from
+// http://stackoverflow.com/questions/5130513/pbkdf2-hmac-sha2-test-vectors
+var sha256TestVectors = []testVector{
+ {
+ "password",
+ "salt",
+ 1,
+ []byte{
+ 0x12, 0x0f, 0xb6, 0xcf, 0xfc, 0xf8, 0xb3, 0x2c,
+ 0x43, 0xe7, 0x22, 0x52, 0x56, 0xc4, 0xf8, 0x37,
+ 0xa8, 0x65, 0x48, 0xc9,
+ },
+ },
+ {
+ "password",
+ "salt",
+ 2,
+ []byte{
+ 0xae, 0x4d, 0x0c, 0x95, 0xaf, 0x6b, 0x46, 0xd3,
+ 0x2d, 0x0a, 0xdf, 0xf9, 0x28, 0xf0, 0x6d, 0xd0,
+ 0x2a, 0x30, 0x3f, 0x8e,
+ },
+ },
+ {
+ "password",
+ "salt",
+ 4096,
+ []byte{
+ 0xc5, 0xe4, 0x78, 0xd5, 0x92, 0x88, 0xc8, 0x41,
+ 0xaa, 0x53, 0x0d, 0xb6, 0x84, 0x5c, 0x4c, 0x8d,
+ 0x96, 0x28, 0x93, 0xa0,
+ },
+ },
+ {
+ "passwordPASSWORDpassword",
+ "saltSALTsaltSALTsaltSALTsaltSALTsalt",
+ 4096,
+ []byte{
+ 0x34, 0x8c, 0x89, 0xdb, 0xcb, 0xd3, 0x2b, 0x2f,
+ 0x32, 0xd8, 0x14, 0xb8, 0x11, 0x6e, 0x84, 0xcf,
+ 0x2b, 0x17, 0x34, 0x7e, 0xbc, 0x18, 0x00, 0x18,
+ 0x1c,
+ },
+ },
+ {
+ "pass\000word",
+ "sa\000lt",
+ 4096,
+ []byte{
+ 0x89, 0xb6, 0x9d, 0x05, 0x16, 0xf8, 0x29, 0x89,
+ 0x3c, 0x69, 0x62, 0x26, 0x65, 0x0a, 0x86, 0x87,
+ },
+ },
+}
+
+func testHash(t *testing.T, h func() hash.Hash, hashName string, vectors []testVector) {
+ for i, v := range vectors {
+ o := Key([]byte(v.password), []byte(v.salt), v.iter, len(v.output), h)
+ if !bytes.Equal(o, v.output) {
+ t.Errorf("%s %d: expected %x, got %x", hashName, i, v.output, o)
+ }
+ }
+}
+
+func TestWithHMACSHA1(t *testing.T) {
+ testHash(t, sha1.New, "SHA1", sha1TestVectors)
+}
+
+func TestWithHMACSHA256(t *testing.T) {
+ testHash(t, sha256.New, "SHA256", sha256TestVectors)
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256_test.go
index d441d9d..87afb3f 100644
--- a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256_test.go
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256_test.go
@@ -3,27 +3,21 @@ package secp256k1
import (
"bytes"
"encoding/hex"
- "fmt"
"log"
- "math/rand"
"testing"
)
-const TESTS = 1 //10000 // how many tests
-const SigSize = 65 //64+1
-
-func Test_Secp256_00(t *testing.T) {
-
- nonce := RandByte(32) //going to get bitcoins stolen!
+const TESTS = 10000 // number of tests to use for random tests
+func TestRandByte(t *testing.T) {
+ nonce := RandByte(32)
if len(nonce) != 32 {
t.Fatal()
}
-
}
-//test agreement for highest bit test
-func Test_BitTwiddle(t *testing.T) {
+func TestBitTwiddle(t *testing.T) {
+ // test agreement for highest bit test
var b byte
for i := 0; i < 512; i++ {
bool1 := ((b >> 7) == 1)
@@ -35,23 +29,22 @@ func Test_BitTwiddle(t *testing.T) {
}
}
-//tests for Malleability
-//highest bit of S must be 0; 32nd byte
-func CompactSigTest(sig []byte) {
+// assertSigCompact tests for signature malleability
+// highest bit of S must be 0; 32nd byte
+func assertSigCompact(sig []byte) {
b := int(sig[32])
if b < 0 {
log.Panic()
}
if ((b >> 7) == 1) != ((b & 0x80) == 0x80) {
- log.Panicf("b= %v b2= %v \n", b, b>>7)
+ log.Panicf("b=%v b2=%v", b, b>>7)
}
if (b & 0x80) == 0x80 {
- log.Panicf("b= %v b2= %v \n", b, b&0x80)
+ log.Panicf("b=%v b2=%v", b, b&0x80)
}
}
-//test pubkey/private generation
-func Test_Secp256_01(t *testing.T) {
+func TestGenerateKeyPairValidKeys(t *testing.T) {
pubkey, seckey := GenerateKeyPair()
if VerifySeckey(seckey) != 1 {
t.Fatal()
@@ -61,11 +54,19 @@ func Test_Secp256_01(t *testing.T) {
}
}
-// test compressed pubkey from private key
-func Test_PubkeyFromSeckey(t *testing.T) {
+func TestPubkeyFromSeckey(t *testing.T) {
+ // test compressed pubkey from private key
// http://www.righto.com/2014/02/bitcoins-hard-way-using-raw-bitcoin.html
- privkey, _ := hex.DecodeString(`f19c523315891e6e15ae0608a35eec2e00ebd6d1984cf167f46336dabd9b2de4`)
- desiredPubKey, _ := hex.DecodeString(`03fe43d0c2c3daab30f9472beb5b767be020b81c7cc940ed7a7e910f0c1d9feef1`)
+ privkey, err := hex.DecodeString(`f19c523315891e6e15ae0608a35eec2e00ebd6d1984cf167f46336dabd9b2de4`)
+ if err != nil {
+ t.Fatal()
+ }
+
+ desiredPubKey, err := hex.DecodeString(`03fe43d0c2c3daab30f9472beb5b767be020b81c7cc940ed7a7e910f0c1d9feef1`)
+ if err != nil {
+ t.Fatal()
+ }
+
if pubkey := PubkeyFromSeckey(privkey); pubkey == nil {
t.Fatal()
} else if !bytes.Equal(pubkey, desiredPubKey) {
@@ -73,11 +74,19 @@ func Test_PubkeyFromSeckey(t *testing.T) {
}
}
-// test uncompressed pubkey from private key
-func Test_UncompressedPubkeyFromSeckey(t *testing.T) {
+func TestUncompressedPubkeyFromSeckey(t *testing.T) {
+ // test uncompressed pubkey from private key
// http://www.righto.com/2014/02/bitcoins-hard-way-using-raw-bitcoin.html
- privkey, _ := hex.DecodeString(`f19c523315891e6e15ae0608a35eec2e00ebd6d1984cf167f46336dabd9b2de4`)
- desiredPubKey, _ := hex.DecodeString(`04fe43d0c2c3daab30f9472beb5b767be020b81c7cc940ed7a7e910f0c1d9feef10fe85eb3ce193405c2dd8453b7aeb6c1752361efdbf4f52ea8bf8f304aab37ab`)
+ privkey, err := hex.DecodeString(`f19c523315891e6e15ae0608a35eec2e00ebd6d1984cf167f46336dabd9b2de4`)
+ if err != nil {
+ t.Fatal()
+ }
+
+ desiredPubKey, err := hex.DecodeString(`04fe43d0c2c3daab30f9472beb5b767be020b81c7cc940ed7a7e910f0c1d9feef10fe85eb3ce193405c2dd8453b7aeb6c1752361efdbf4f52ea8bf8f304aab37ab`)
+ if err != nil {
+ t.Fatal()
+ }
+
if pubkey := UncompressedPubkeyFromSeckey(privkey); pubkey == nil {
t.Fatal()
} else if !bytes.Equal(pubkey, desiredPubKey) {
@@ -85,15 +94,15 @@ func Test_UncompressedPubkeyFromSeckey(t *testing.T) {
}
}
-//returns random pubkey, seckey, hash and signature
-func RandX() ([]byte, []byte, []byte, []byte) {
+// returns random pubkey, seckey, hash and signature
+func randX() ([]byte, []byte, []byte, []byte) {
pubkey, seckey := GenerateKeyPair()
msg := RandByte(32)
sig := Sign(msg, seckey)
return pubkey, seckey, msg, sig
}
-func Test_SignatureVerifyPubkey(t *testing.T) {
+func TestSignatureVerifyPubkey(t *testing.T) {
pubkey1, seckey := GenerateKeyPair()
msg := RandByte(32)
sig := Sign(msg, seckey)
@@ -101,13 +110,13 @@ func Test_SignatureVerifyPubkey(t *testing.T) {
t.Fail()
}
pubkey2 := RecoverPubkey(msg, sig)
- if bytes.Equal(pubkey1, pubkey2) == false {
+ if !bytes.Equal(pubkey1, pubkey2) {
t.Fatal("Recovered pubkey does not match")
}
}
-func Test_verify_functions(t *testing.T) {
- pubkey, seckey, hash, sig := RandX()
+func TestVerifyFunctions(t *testing.T) {
+ pubkey, seckey, hash, sig := randX()
if VerifySeckey(seckey) == 0 {
t.Fail()
}
@@ -117,25 +126,14 @@ func Test_verify_functions(t *testing.T) {
if VerifySignature(hash, sig, pubkey) == 0 {
t.Fail()
}
- _ = sig
}
-func Test_SignatureVerifySecKey(t *testing.T) {
- pubkey, seckey := GenerateKeyPair()
- if VerifySeckey(seckey) == 0 {
- t.Fail()
- }
- if VerifyPubkey(pubkey) == 0 {
- t.Fail()
- }
-}
-
-//test size of messages
-func Test_Secp256_02s(t *testing.T) {
+func TestSignOutputSizes(t *testing.T) {
+ // test size of messages
pubkey, seckey := GenerateKeyPair()
msg := RandByte(32)
sig := Sign(msg, seckey)
- CompactSigTest(sig)
+ assertSigCompact(sig)
if sig == nil {
t.Fatal("Signature nil")
}
@@ -150,11 +148,11 @@ func Test_Secp256_02s(t *testing.T) {
}
if int(sig[64]) > 4 {
t.Fail()
- } //should be 0 to 4
+ } // recovery ID, should be 0 to 4
}
-//test signing message
-func Test_Secp256_02(t *testing.T) {
+func TestRecoverVerifySignature1(t *testing.T) {
+ // test signing message
pubkey1, seckey := GenerateKeyPair()
msg := RandByte(32)
sig := Sign(msg, seckey)
@@ -166,7 +164,7 @@ func Test_Secp256_02(t *testing.T) {
if pubkey2 == nil {
t.Fatal("Recovered pubkey invalid")
}
- if bytes.Equal(pubkey1, pubkey2) == false {
+ if !bytes.Equal(pubkey1, pubkey2) {
t.Fatal("Recovered pubkey does not match")
}
@@ -176,8 +174,51 @@ func Test_Secp256_02(t *testing.T) {
}
}
-//test pubkey recovery
-func Test_Secp256_02a(t *testing.T) {
+func TestVerifySignatureFailure(t *testing.T) {
+ p, s := GenerateKeyPair()
+ msg := RandByte(32)
+ sig := Sign(msg, s)
+ if sig == nil {
+ t.Fatal("Signature nil")
+ }
+
+ p2, _ := GenerateKeyPair()
+
+ if bytes.Equal(p, p2) {
+ t.Fatal("GenerateKeyPair returned the same keys twice")
+ }
+
+ // Signature has different pubkey
+ ret := VerifySignature(msg, sig, p2)
+ if ret == 1 {
+ t.Fatal("Signature unexpectedly valid")
+ }
+
+ msg2 := RandByte(32)
+ sig2 := Sign(msg2, s)
+
+ // Signature is for a different message
+ ret = VerifySignature(msg, sig2, p)
+ if ret == 1 {
+ t.Fatal("Signature unexpectedly valid")
+ }
+
+ // Signature is for a different message
+ ret = VerifySignature(msg2, sig, p)
+ if ret == 1 {
+ t.Fatal("Signature unexpectedly valid")
+ }
+
+ // Signature is for a different message
+ msg3 := RandByte(32)
+ ret = VerifySignature(msg3, sig, p)
+ if ret == 1 {
+ t.Fatal("Signature unexpectedly valid")
+ }
+}
+
+func TestRecoverVerifySignature2(t *testing.T) {
+ // test pubkey recovery
pubkey1, seckey1 := GenerateKeyPair()
msg := RandByte(32)
sig := Sign(msg, seckey1)
@@ -199,18 +240,18 @@ func Test_Secp256_02a(t *testing.T) {
t.Fatal()
}
}
- if bytes.Equal(pubkey1, pubkey2) == false {
+ if !bytes.Equal(pubkey1, pubkey2) {
t.Fatal()
}
}
-//test random messages for the same pub/private key
-func Test_Secp256_03(t *testing.T) {
+func TestRecoverPubkey1(t *testing.T) {
+ // test random messages for the same pub/private key
_, seckey := GenerateKeyPair()
for i := 0; i < TESTS; i++ {
msg := RandByte(32)
sig := Sign(msg, seckey)
- CompactSigTest(sig)
+ assertSigCompact(sig)
sig[len(sig)-1] %= 4
pubkey2 := RecoverPubkey(msg, sig)
@@ -220,13 +261,13 @@ func Test_Secp256_03(t *testing.T) {
}
}
-//test random messages for different pub/private keys
-func Test_Secp256_04(t *testing.T) {
+func TestRecoverPubkey2(t *testing.T) {
+ // test random messages for different pub/private keys
for i := 0; i < TESTS; i++ {
pubkey1, seckey := GenerateKeyPair()
msg := RandByte(32)
sig := Sign(msg, seckey)
- CompactSigTest(sig)
+ assertSigCompact(sig)
if sig[len(sig)-1] >= 4 {
t.Fail()
@@ -235,17 +276,12 @@ func Test_Secp256_04(t *testing.T) {
if pubkey2 == nil {
t.Fail()
}
- if bytes.Equal(pubkey1, pubkey2) == false {
+ if !bytes.Equal(pubkey1, pubkey2) {
t.Fail()
}
}
}
-//test random signatures against fixed messages; should fail
-
-//crashes:
-// -SIPA look at this
-
func randSig() []byte {
sig := RandByte(65)
sig[32] &= 0x70
@@ -253,7 +289,7 @@ func randSig() []byte {
return sig
}
-func Test_Secp256_06a_alt0(t *testing.T) {
+func TestRecoverVerifySignature3(t *testing.T) {
pubkey1, seckey := GenerateKeyPair()
msg := RandByte(32)
sig := Sign(msg, seckey)
@@ -268,7 +304,7 @@ func Test_Secp256_06a_alt0(t *testing.T) {
sig = randSig()
pubkey2 := RecoverPubkey(msg, sig)
- if bytes.Equal(pubkey1, pubkey2) == true {
+ if bytes.Equal(pubkey1, pubkey2) {
t.Fail()
}
@@ -282,9 +318,8 @@ func Test_Secp256_06a_alt0(t *testing.T) {
}
}
-//test random messages against valid signature: should fail
-
-func Test_Secp256_06b(t *testing.T) {
+func TestRecoverVerifySignature4(t *testing.T) {
+ // test random messages against valid signature: should fail
pubkey1, seckey := GenerateKeyPair()
msg := RandByte(32)
sig := Sign(msg, seckey)
@@ -293,7 +328,7 @@ func Test_Secp256_06b(t *testing.T) {
for i := 0; i < TESTS; i++ {
msg = RandByte(32)
pubkey2 := RecoverPubkey(msg, sig)
- if bytes.Equal(pubkey1, pubkey2) == true {
+ if bytes.Equal(pubkey1, pubkey2) {
t.Fail()
}
@@ -306,7 +341,7 @@ func Test_Secp256_06b(t *testing.T) {
}
}
if failCount != 0 {
- fmt.Printf("ERROR: Accepted signature for %v of %v random messages\n", failCount, TESTS)
+ t.Errorf("ERROR: Accepted signature for %v of %v random messages", failCount, TESTS)
}
}
@@ -314,192 +349,392 @@ func Test_Secp256_06b(t *testing.T) {
Deterministic Keypair Tests
*/
-func Test_Deterministic_Keypairs_00(t *testing.T) {
+func TestDeterministicKeypairs00(t *testing.T) {
for i := 0; i < 64; i++ {
seed := RandByte(64)
_, pub1, sec1 := DeterministicKeyPairIterator(seed)
pub2, sec2 := GenerateDeterministicKeyPair(seed)
- if bytes.Equal(pub1, pub2) == false {
+ if !bytes.Equal(pub1, pub2) {
t.Fail()
}
- if bytes.Equal(sec1, sec2) == false {
+ if !bytes.Equal(sec1, sec2) {
t.Fail()
}
}
}
-func Test_Deterministic_Keypairs_01(t *testing.T) {
+func TestDeterministicKeypairs01(t *testing.T) {
for i := 0; i < 64; i++ {
seed := RandByte(32)
_, pub1, sec1 := DeterministicKeyPairIterator(seed)
pub2, sec2 := GenerateDeterministicKeyPair(seed)
- if bytes.Equal(pub1, pub2) == false {
+ if !bytes.Equal(pub1, pub2) {
t.Fail()
}
- if bytes.Equal(sec1, sec2) == false {
+ if !bytes.Equal(sec1, sec2) {
t.Fail()
}
}
}
-func Test_Deterministic_Keypairs_02(t *testing.T) {
+func TestDeterministicKeypairs02(t *testing.T) {
for i := 0; i < 64; i++ {
seed := RandByte(32)
_, pub1, sec1 := DeterministicKeyPairIterator(seed)
pub2, sec2 := GenerateDeterministicKeyPair(seed)
- if bytes.Equal(pub1, pub2) == false {
+ if !bytes.Equal(pub1, pub2) {
t.Fail()
}
- if bytes.Equal(sec1, sec2) == false {
+ if !bytes.Equal(sec1, sec2) {
t.Fail()
}
}
}
-func Decode(str string) []byte {
- byt, err := hex.DecodeString(str)
+func MustDecodeHex(str string) []byte {
+ b, err := hex.DecodeString(str)
if err != nil {
- log.Panic()
- }
- return byt
-}
-
-func Test_Deterministic_Keypairs_03(t *testing.T) {
-
- //test vectors: seed, seckey
- var testArray = []string{
- "tQ93w5Aqcunm9SGUfnmF4fJv", "9b8c3e36adce64dedc80d6dfe51ff1742cc1d755bbad457ac01177c5a18a789f",
- "DC7qdQQtbWSSaekXnFmvQgse", "d2deaf4a9ff7a5111fe1d429d6976cbde78811fdd075371a2a4449bb0f4d8bf9",
- "X8EkuUZC7Td7PAXeS7Duc7vR", "cad79b6dcf7bd21891cbe20a51c57d59689ae6e3dc482cd6ec22898ac00cd86b",
- "tVqPYHHNVPRWyEed62v7f23u", "2a386e94e9ffaa409517cbed81b9b2d4e1c5fb4afe3cbd67ce8aba11af0b02fa",
- "kCy4R57HDfLqF3pVhBWxuMcg", "26a7c6d8809c476a56f7455209f58b5ff3f16435fcf208ff2931ece60067f305",
- "j8bjv86ZNjKqzafR6mtSUVCE", "ea5c0f8c9f091a70bf38327adb9b2428a9293e7a7a75119920d759ecfa03a995",
- "qShryAzVY8EtsuD3dsAc7qnG", "331206176509bcae31c881dc51e90a4e82ec33cd7208a5fb4171ed56602017fa",
- "5FGG7ZBa8wVMBJkmzpXj5ESX", "4ea2ad82e7730d30c0c21d01a328485a0cf5543e095139ba613929be7739b52c",
- "f46TZG4xJHXUGWx8ekbNqa9F", "dcddd403d3534c4ef5703cc07a771c107ed49b7e0643c6a2985a96149db26108",
- "XkZdQJ5LT96wshN8JBH8rvEt", "3e276219081f072dff5400ca29a9346421eaaf3c419ff1474ac1c81ad8a9d6e1",
- "GFDqXU4zYymhJJ9UGqRgS8ty", "95be4163085b571e725edeffa83fff8e7a7db3c1ccab19d0f3c6e105859b5e10",
- "tmwZksH2XyvuamnddYxyJ5Lp", "2666dd54e469df56c02e82dffb4d3ea067daafe72c54dc2b4f08c4fb3a7b7e42",
- "EuqZFsbAV5amTzkhgAMgjr7W", "40c325c01f2e4087fcc97fcdbea6c35c88a12259ebf1bce0b14a4d77f075abbf",
- "TW6j8rMffZfmhyDEt2JUCrLB", "e676e0685c5d1afd43ad823b83db5c6100135c35485146276ee0b0004bd6689e",
- "8rvkBnygfhWP8kjX9aXq68CY", "21450a646eed0d4aa50a1736e6c9bf99fff006a470aab813a2eff3ee4d460ae4",
- "phyRfPDuf9JMRFaWdGh7NXPX", "ca7bc04196c504d0e815e125f7f1e086c8ae8c10d5e9df984aeab4b41bf9e398",
- }
-
- for i := 0; i < len(testArray)/2; i++ {
- seed := []byte(testArray[2*i+0])
- sec1 := Decode(testArray[2*i+1])
-
- _, sec2 := GenerateDeterministicKeyPair(seed)
- if bytes.Equal(sec1, sec2) == false {
- t.Fail()
- }
- }
-}
-
-func Test_DeterministicWallets1(t *testing.T) {
-
- var testArray = []string{
- "90c56f5b8d78a46fb4cddf6fd9c6d88d6d2d7b0ec35917c7dac12c03b04e444e", "94dd1a9de9ffd57b5516b8a7f090da67f142f7d22356fa5d1b894ee4d4fba95b",
- "a3b08ccf8cbae4955c02f223be1f97d2bb41d92b7f0c516eb8467a17da1e6057", "82fba4cc2bc29eef122f116f45d01d82ff488d7ee713f8a95c162a64097239e0",
- "7048eb8fa93cec992b93dc8e93c5543be34aad05239d4c036cf9e587bbcf7654", "44c059496aac871ac168bb6889b9dd3decdb9e1fa082442a95fcbca982643425",
- "6d25375591bbfce7f601fc5eb40e4f3dde2e453dc4bf31595d8ec29e4370cd80", "d709ceb1a6fb906de506ea091c844ca37c65e52778b8d257d1dd3a942ab367fb",
- "7214b4c09f584c5ddff971d469df130b9a3c03e0277e92be159279de39462120", "5fe4986fa964773041e119d2b6549acb392b2277a72232af75cbfb62c357c1a7",
- "b13e78392d5446ae304b5fc9d45b85f26996982b2c0c86138afdac8d2ea9016e", "f784abc2e7f11ee84b4adb72ea4730a6aabe27b09604c8e2b792d8a1a31881ac",
- "9403bff4240a5999e17e0ab4a645d6942c3a7147c7834e092e461a4580249e6e", "d495174b8d3f875226b9b939121ec53f9383bd560d34aa5ca3ac6b257512adf4",
- "2665312a3e3628f4df0b9bc6334f530608a9bcdd4d1eef174ecda99f51a6db94", "1fdc9fbfc6991b9416b3a8385c9942e2db59009aeb2d8de349b73d9f1d389374",
- "6cb37532c80765b7c07698502a49d69351036f57a45a5143e33c57c236d841ca", "c87c85a6f482964db7f8c31720981925b1e357a9fdfcc585bc2164fdef1f54d0",
- "8654a32fa120bfdb7ca02c487469070eba4b5a81b03763a2185fdf5afd756f3c", "e2767d788d1c5620f3ef21d57f2d64559ab203c044f0a5f0730b21984e77019c",
- "66d1945ceb6ef8014b1b6703cb624f058913e722f15d03225be27cb9d8aabe4a", "3fcb80eb1d5b91c491408447ac4e221fcb2254c861adbb5a178337c2750b0846",
- "22c7623bf0e850538329e3e6d9a6f9b1235350824a3feaad2580b7a853550deb", "5577d4be25f1b44487140a626c8aeca2a77507a1fc4fd466dd3a82234abb6785",
- "a5eebe3469d68c8922a1a8b5a0a2b55293b7ff424240c16feb9f51727f734516", "c07275582d0681eb07c7b51f0bca0c48c056d571b7b83d84980ab40ac7d7d720",
- "479ec3b589b14aa7290b48c2e64072e4e5b15ce395d2072a5a18b0a2cf35f3fd", "f10e2b7675dfa557d9e3188469f12d3e953c2d46dce006cd177b6ae7f465cfc0",
- "63952334b731ec91d88c54614925576f82e3610d009657368fc866e7b1efbe73", "0bcbebb39d8fe1cb3eab952c6f701656c234e462b945e2f7d4be2c80b8f2d974",
- "256472ee754ef6af096340ab1e161f58e85fb0cc7ae6e6866b9359a1657fa6c1", "88ba6f6c66fc0ef01c938569c2dd1f05475cb56444f4582d06828e77d54ffbe6",
- }
-
- for i := 0; i < len(testArray)/2; i++ {
- seed := Decode(testArray[2*i+0]) //input
- seckey1 := Decode(testArray[2*i+1]) //target
- _, _, seckey2 := DeterministicKeyPairIterator(seed) //output
- if bytes.Equal(seckey1, seckey2) == false {
- t.Fail()
- }
- }
-}
-
-func Test_Secp256k1_Hash(t *testing.T) {
-
- var testArray = []string{
- "90c56f5b8d78a46fb4cddf6fd9c6d88d6d2d7b0ec35917c7dac12c03b04e444e", "a70c36286be722d8111e69e910ce4490005bbf9135b0ce8e7a59f84eee24b88b",
- "a3b08ccf8cbae4955c02f223be1f97d2bb41d92b7f0c516eb8467a17da1e6057", "e9db072fe5817325504174253a056be7b53b512f1e588f576f1f5a82cdcad302",
- "7048eb8fa93cec992b93dc8e93c5543be34aad05239d4c036cf9e587bbcf7654", "5e9133e83c4add2b0420d485e1dcda5c00e283c6509388ab8ceb583b0485c13b",
- "6d25375591bbfce7f601fc5eb40e4f3dde2e453dc4bf31595d8ec29e4370cd80", "8d5579cd702c06c40fb98e1d55121ea0d29f3a6c42f5582b902ac243f29b571a",
- "7214b4c09f584c5ddff971d469df130b9a3c03e0277e92be159279de39462120", "3a4e8c72921099a0e6a4e7f979df4c8bced63063097835cdfd5ee94548c9c41a",
- "b13e78392d5446ae304b5fc9d45b85f26996982b2c0c86138afdac8d2ea9016e", "462efa1bf4f639ffaedb170d6fb8ba363efcb1bdf0c5aef0c75afb59806b8053",
- "9403bff4240a5999e17e0ab4a645d6942c3a7147c7834e092e461a4580249e6e", "68dd702ea7c7352632876e9dc2333142fce857a542726e402bb480cad364f260",
- "2665312a3e3628f4df0b9bc6334f530608a9bcdd4d1eef174ecda99f51a6db94", "5db72c31d575c332e60f890c7e68d59bd3d0ac53a832e06e821d819476e1f010",
- "6cb37532c80765b7c07698502a49d69351036f57a45a5143e33c57c236d841ca", "0deb20ec503b4c678213979fd98018c56f24e9c1ec99af3cd84b43c161a9bb5c",
- "8654a32fa120bfdb7ca02c487469070eba4b5a81b03763a2185fdf5afd756f3c", "36f3ede761aa683813013ffa84e3738b870ce7605e0a958ed4ffb540cd3ea504",
- "66d1945ceb6ef8014b1b6703cb624f058913e722f15d03225be27cb9d8aabe4a", "6bcb4819a96508efa7e32ee52b0227ccf5fbe5539687aae931677b24f6d0bbbd",
- "22c7623bf0e850538329e3e6d9a6f9b1235350824a3feaad2580b7a853550deb", "8bb257a1a17fd2233935b33441d216551d5ff1553d02e4013e03f14962615c16",
- "a5eebe3469d68c8922a1a8b5a0a2b55293b7ff424240c16feb9f51727f734516", "d6b780983a63a3e4bcf643ee68b686421079c835a99eeba6962fe41bb355f8da",
- "479ec3b589b14aa7290b48c2e64072e4e5b15ce395d2072a5a18b0a2cf35f3fd", "39c5f108e7017e085fe90acfd719420740e57768ac14c94cb020d87e36d06752",
- "63952334b731ec91d88c54614925576f82e3610d009657368fc866e7b1efbe73", "79f654976732106c0e4a97ab3b6d16f343a05ebfcc2e1d679d69d396e6162a77",
- "256472ee754ef6af096340ab1e161f58e85fb0cc7ae6e6866b9359a1657fa6c1", "387883b86e2acc153aa334518cea48c0c481b573ccaacf17c575623c392f78b2",
- }
-
- for i := 0; i < len(testArray)/2; i++ {
- hash1 := Decode(testArray[2*i+0]) //input
- hash2 := Decode(testArray[2*i+1]) //target
- hash3 := Secp256k1Hash(hash1) //output
- if bytes.Equal(hash2, hash3) == false {
- t.Fail()
- }
- }
-}
-
-func Test_Secp256k1_Equal(t *testing.T) {
-
+ panic(err)
+ }
+ return b
+}
+
+func TestDeterministicKeypairs03(t *testing.T) {
+ cases := []struct {
+ seed string
+ seckey string
+ pubkey string
+ }{
+ {
+ seed: "tQ93w5Aqcunm9SGUfnmF4fJv",
+ seckey: "9b8c3e36adce64dedc80d6dfe51ff1742cc1d755bbad457ac01177c5a18a789f",
+ pubkey: "03996e65d79e957ce1eafb57453e55b55906e04c8de556e54961eb06a4836c06df",
+ },
+ {
+ seed: "DC7qdQQtbWSSaekXnFmvQgse",
+ seckey: "d2deaf4a9ff7a5111fe1d429d6976cbde78811fdd075371a2a4449bb0f4d8bf9",
+ pubkey: "0283a86efb1b8d82147c336c83d991f8124f0c4ca62c1019d6af1db46ae34594be",
+ },
+ {
+ seed: "X8EkuUZC7Td7PAXeS7Duc7vR",
+ seckey: "cad79b6dcf7bd21891cbe20a51c57d59689ae6e3dc482cd6ec22898ac00cd86b",
+ pubkey: "03f1fbd857b8a19de3ae35d616d41f179c0f3de94231e3caabf34eabf4674a1643",
+ },
+ {
+ seed: "tVqPYHHNVPRWyEed62v7f23u",
+ seckey: "2a386e94e9ffaa409517cbed81b9b2d4e1c5fb4afe3cbd67ce8aba11af0b02fa",
+ pubkey: "03ebde2c29e3beadab6f324ceb82a71c23655678e47d97f1d92159c3d7e4b59be4",
+ },
+ {
+ seed: "kCy4R57HDfLqF3pVhBWxuMcg",
+ seckey: "26a7c6d8809c476a56f7455209f58b5ff3f16435fcf208ff2931ece60067f305",
+ pubkey: "03b27bd3ae6b9034a4ffb2173381448c724f649fd0ec14ee0288758aa7812a7338",
+ },
+ {
+ seed: "j8bjv86ZNjKqzafR6mtSUVCE",
+ seckey: "ea5c0f8c9f091a70bf38327adb9b2428a9293e7a7a75119920d759ecfa03a995",
+ pubkey: "0236b5d52711f8a11da664c57da4378690751016ecf3089eed4ed1833c610046b6",
+ },
+ {
+ seed: "qShryAzVY8EtsuD3dsAc7qnG",
+ seckey: "331206176509bcae31c881dc51e90a4e82ec33cd7208a5fb4171ed56602017fa",
+ pubkey: "02da2aab09ec94e8a40d7381f72ff6585bf7d87e4a599d1408d2686ce5514692b1",
+ },
+ {
+ seed: "5FGG7ZBa8wVMBJkmzpXj5ESX",
+ seckey: "4ea2ad82e7730d30c0c21d01a328485a0cf5543e095139ba613929be7739b52c",
+ pubkey: "02b7d159de0d705c99e24d609b1591b1fac86d46c2c99e2ce6cc20b7402e32215c",
+ },
+ {
+ seed: "f46TZG4xJHXUGWx8ekbNqa9F",
+ seckey: "dcddd403d3534c4ef5703cc07a771c107ed49b7e0643c6a2985a96149db26108",
+ pubkey: "03e709fbdaf4f3b913b8c4ea887d1fea61ed356fcf0178ee7c2b556ce308cfc001",
+ },
+ {
+ seed: "XkZdQJ5LT96wshN8JBH8rvEt",
+ seckey: "3e276219081f072dff5400ca29a9346421eaaf3c419ff1474ac1c81ad8a9d6e1",
+ pubkey: "03a34782043386f068780cc82d0deffcea1703e4e4bbe67537a89bda0fbd3240e0",
+ },
+ {
+ seed: "GFDqXU4zYymhJJ9UGqRgS8ty",
+ seckey: "95be4163085b571e725edeffa83fff8e7a7db3c1ccab19d0f3c6e105859b5e10",
+ pubkey: "03ad4b4525c6031c2fa3c43722ca6dbde64b30d646b8914b0391096d8964e5d4da",
+ },
+ {
+ seed: "tmwZksH2XyvuamnddYxyJ5Lp",
+ seckey: "2666dd54e469df56c02e82dffb4d3ea067daafe72c54dc2b4f08c4fb3a7b7e42",
+ pubkey: "03f127118872ac5cb83b9167e561a28d82f4691d06297051dc71fb97d00b42aa20",
+ },
+ {
+ seed: "EuqZFsbAV5amTzkhgAMgjr7W",
+ seckey: "40c325c01f2e4087fcc97fcdbea6c35c88a12259ebf1bce0b14a4d77f075abbf",
+ pubkey: "03df10131650e63894e6c43427fc9ad0119a956ef1821c68f0e09e7d90411e6c39",
+ },
+ {
+ seed: "TW6j8rMffZfmhyDEt2JUCrLB",
+ seckey: "e676e0685c5d1afd43ad823b83db5c6100135c35485146276ee0b0004bd6689e",
+ pubkey: "03f9ead3620dfcfcf731d42b0752a2e1549b0190070eed686002e02f58da955731",
+ },
+ {
+ seed: "8rvkBnygfhWP8kjX9aXq68CY",
+ seckey: "21450a646eed0d4aa50a1736e6c9bf99fff006a470aab813a2eff3ee4d460ae4",
+ pubkey: "026ace328af3200b4abe13a29125545bd9d82cc32eed13b782371ef54fb6301d6c",
+ },
+ {
+ seed: "phyRfPDuf9JMRFaWdGh7NXPX",
+ seckey: "ca7bc04196c504d0e815e125f7f1e086c8ae8c10d5e9df984aeab4b41bf9e398",
+ pubkey: "03fc05f68ef56235b777168c45d46dfb8f665d12e4f92265305b2e66aafe000351",
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.seed, func(t *testing.T) {
+ seed := []byte(tc.seed)
+ sec1 := MustDecodeHex(tc.seckey)
+ pub1 := MustDecodeHex(tc.pubkey)
+
+ pub2, sec2 := GenerateDeterministicKeyPair(seed)
+ if !bytes.Equal(sec1, sec2) {
+ t.Error("seckeys don't match")
+ }
+
+ if !bytes.Equal(pub1, pub2) {
+ t.Errorf("pubkeys don't match: %s != %s", tc.pubkey, hex.EncodeToString(pub2))
+ }
+ })
+ }
+}
+
+func TestDeterministicKeyPairIterator1(t *testing.T) {
+ cases := []struct {
+ seed string
+ seckey string
+ pubkey string
+ nextSeed string
+ }{
+ {
+ seed: "90c56f5b8d78a46fb4cddf6fd9c6d88d6d2d7b0ec35917c7dac12c03b04e444e",
+ seckey: "94dd1a9de9ffd57b5516b8a7f090da67f142f7d22356fa5d1b894ee4d4fba95b",
+ pubkey: "03eb71274ba23438f4ce6ac125e20bb78cd8123dc9483b5f34ace67cb6972e4ca8",
+ nextSeed: "a70c36286be722d8111e69e910ce4490005bbf9135b0ce8e7a59f84eee24b88b",
+ },
+ {
+ seed: "a3b08ccf8cbae4955c02f223be1f97d2bb41d92b7f0c516eb8467a17da1e6057",
+ seckey: "82fba4cc2bc29eef122f116f45d01d82ff488d7ee713f8a95c162a64097239e0",
+ pubkey: "02be1c44351c2e4295e4e9257667b164e2a0e471ecf499084357c13e1b5119b4c2",
+ nextSeed: "e9db072fe5817325504174253a056be7b53b512f1e588f576f1f5a82cdcad302",
+ },
+ {
+ seed: "7048eb8fa93cec992b93dc8e93c5543be34aad05239d4c036cf9e587bbcf7654",
+ seckey: "44c059496aac871ac168bb6889b9dd3decdb9e1fa082442a95fcbca982643425",
+ pubkey: "028868f984547f2febcdd9087a1cc3528929598b1afc9feec0fa62233052bff401",
+ nextSeed: "5e9133e83c4add2b0420d485e1dcda5c00e283c6509388ab8ceb583b0485c13b",
+ },
+ {
+ seed: "6d25375591bbfce7f601fc5eb40e4f3dde2e453dc4bf31595d8ec29e4370cd80",
+ seckey: "d709ceb1a6fb906de506ea091c844ca37c65e52778b8d257d1dd3a942ab367fb",
+ pubkey: "035f0b2cd081f6dd45178d87be62c88b020599292cf77834d8a4dab7a7aad6b1be",
+ nextSeed: "8d5579cd702c06c40fb98e1d55121ea0d29f3a6c42f5582b902ac243f29b571a",
+ },
+ {
+ seed: "7214b4c09f584c5ddff971d469df130b9a3c03e0277e92be159279de39462120",
+ seckey: "5fe4986fa964773041e119d2b6549acb392b2277a72232af75cbfb62c357c1a7",
+ pubkey: "032e039b5885d2d6001b2b5eb4b0af473befa04d2d9fbc4c12ef78f30fe186e487",
+ nextSeed: "3a4e8c72921099a0e6a4e7f979df4c8bced63063097835cdfd5ee94548c9c41a",
+ },
+ {
+ seed: "b13e78392d5446ae304b5fc9d45b85f26996982b2c0c86138afdac8d2ea9016e",
+ seckey: "f784abc2e7f11ee84b4adb72ea4730a6aabe27b09604c8e2b792d8a1a31881ac",
+ pubkey: "02f32b1960c1e61ccc58bb458b8e6fc74a2c37fcb1deb0708251b35e55ba11795e",
+ nextSeed: "462efa1bf4f639ffaedb170d6fb8ba363efcb1bdf0c5aef0c75afb59806b8053",
+ },
+ {
+ seed: "9403bff4240a5999e17e0ab4a645d6942c3a7147c7834e092e461a4580249e6e",
+ seckey: "d495174b8d3f875226b9b939121ec53f9383bd560d34aa5ca3ac6b257512adf4",
+ pubkey: "03f60cefd9bcc6f38487ae082d475c86ee086f0dfed25ff8758c1a9b06862dd0b8",
+ nextSeed: "68dd702ea7c7352632876e9dc2333142fce857a542726e402bb480cad364f260",
+ },
+ {
+ seed: "2665312a3e3628f4df0b9bc6334f530608a9bcdd4d1eef174ecda99f51a6db94",
+ seckey: "1fdc9fbfc6991b9416b3a8385c9942e2db59009aeb2d8de349b73d9f1d389374",
+ pubkey: "029a3b04c75549c8a509fb42a2fa4e8d8361bbe543ee93ccecea90411924f5ab5b",
+ nextSeed: "5db72c31d575c332e60f890c7e68d59bd3d0ac53a832e06e821d819476e1f010",
+ },
+ {
+ seed: "6cb37532c80765b7c07698502a49d69351036f57a45a5143e33c57c236d841ca",
+ seckey: "c87c85a6f482964db7f8c31720981925b1e357a9fdfcc585bc2164fdef1f54d0",
+ pubkey: "02b0f062bdf46066a9a7adb9337a6741ffe95ec26c5652d178dfff88ad302c962d",
+ nextSeed: "0deb20ec503b4c678213979fd98018c56f24e9c1ec99af3cd84b43c161a9bb5c",
+ },
+ {
+ seed: "8654a32fa120bfdb7ca02c487469070eba4b5a81b03763a2185fdf5afd756f3c",
+ seckey: "e2767d788d1c5620f3ef21d57f2d64559ab203c044f0a5f0730b21984e77019c",
+ pubkey: "03251fa5b85a9ada12787234e0ceb3dcc5bd58a49c15ac0749a4238f3bca6d9a1d",
+ nextSeed: "36f3ede761aa683813013ffa84e3738b870ce7605e0a958ed4ffb540cd3ea504",
+ },
+ {
+ seed: "66d1945ceb6ef8014b1b6703cb624f058913e722f15d03225be27cb9d8aabe4a",
+ seckey: "3fcb80eb1d5b91c491408447ac4e221fcb2254c861adbb5a178337c2750b0846",
+ pubkey: "03975043476a76b72b093d684b8a0979d8b246c2f99f16f95760d6d3490c2e37a1",
+ nextSeed: "6bcb4819a96508efa7e32ee52b0227ccf5fbe5539687aae931677b24f6d0bbbd",
+ },
+ {
+ seed: "22c7623bf0e850538329e3e6d9a6f9b1235350824a3feaad2580b7a853550deb",
+ seckey: "5577d4be25f1b44487140a626c8aeca2a77507a1fc4fd466dd3a82234abb6785",
+ pubkey: "0262e2054c89ad173f741e413d12f511a2cf98783c43f18f8dbb6274bdd584a3dc",
+ nextSeed: "8bb257a1a17fd2233935b33441d216551d5ff1553d02e4013e03f14962615c16",
+ },
+ {
+ seed: "a5eebe3469d68c8922a1a8b5a0a2b55293b7ff424240c16feb9f51727f734516",
+ seckey: "c07275582d0681eb07c7b51f0bca0c48c056d571b7b83d84980ab40ac7d7d720",
+ pubkey: "03d80474b8e6002793374a99d884605cf022d216573459b7deb19b6ccb110d286a",
+ nextSeed: "d6b780983a63a3e4bcf643ee68b686421079c835a99eeba6962fe41bb355f8da",
+ },
+ {
+ seed: "479ec3b589b14aa7290b48c2e64072e4e5b15ce395d2072a5a18b0a2cf35f3fd",
+ seckey: "f10e2b7675dfa557d9e3188469f12d3e953c2d46dce006cd177b6ae7f465cfc0",
+ pubkey: "0219d5b487ebdf719a994dcde094072e0227fc23e4cdbc4cce3b9d3e4a4ffe0668",
+ nextSeed: "39c5f108e7017e085fe90acfd719420740e57768ac14c94cb020d87e36d06752",
+ },
+ {
+ seed: "63952334b731ec91d88c54614925576f82e3610d009657368fc866e7b1efbe73",
+ seckey: "0bcbebb39d8fe1cb3eab952c6f701656c234e462b945e2f7d4be2c80b8f2d974",
+ pubkey: "02b58d9eb9595c24438a6ae4a4be4a408c0cd7a3017c3780cba253171cc9e62627",
+ nextSeed: "79f654976732106c0e4a97ab3b6d16f343a05ebfcc2e1d679d69d396e6162a77",
+ },
+ {
+ seed: "256472ee754ef6af096340ab1e161f58e85fb0cc7ae6e6866b9359a1657fa6c1",
+ seckey: "88ba6f6c66fc0ef01c938569c2dd1f05475cb56444f4582d06828e77d54ffbe6",
+ pubkey: "02dac6b246a06ac7c38a63f4f10b1344a8cd6f920a8fd74523bd2f5d4a9a3055b2",
+ nextSeed: "387883b86e2acc153aa334518cea48c0c481b573ccaacf17c575623c392f78b2",
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.seed, func(t *testing.T) {
+ seed := MustDecodeHex(tc.seed)
+ seckey1 := MustDecodeHex(tc.seckey)
+ pubkey1 := MustDecodeHex(tc.pubkey)
+ nextSeed1 := MustDecodeHex(tc.nextSeed)
+ nextSeed2, pubkey2, seckey2 := DeterministicKeyPairIterator(seed)
+ if !bytes.Equal(seckey1, seckey2) {
+ t.Error("seckeys don't match")
+ }
+ if !bytes.Equal(pubkey1, pubkey2) {
+ t.Errorf("pubkeys don't match: %s != %s", tc.pubkey, hex.EncodeToString(pubkey2))
+ }
+ if !bytes.Equal(nextSeed1, nextSeed2) {
+ t.Errorf("nextSeeds don't match: %s != %s", tc.nextSeed, hex.EncodeToString(nextSeed2))
+ }
+ })
+ }
+}
+
+func TestSecp256k1Hash(t *testing.T) {
+ cases := []struct {
+ seed string
+ hash string
+ }{
+ {
+ seed: "90c56f5b8d78a46fb4cddf6fd9c6d88d6d2d7b0ec35917c7dac12c03b04e444e",
+ hash: "a70c36286be722d8111e69e910ce4490005bbf9135b0ce8e7a59f84eee24b88b",
+ },
+ {
+ seed: "a3b08ccf8cbae4955c02f223be1f97d2bb41d92b7f0c516eb8467a17da1e6057",
+ hash: "e9db072fe5817325504174253a056be7b53b512f1e588f576f1f5a82cdcad302",
+ },
+ {
+ seed: "7048eb8fa93cec992b93dc8e93c5543be34aad05239d4c036cf9e587bbcf7654",
+ hash: "5e9133e83c4add2b0420d485e1dcda5c00e283c6509388ab8ceb583b0485c13b",
+ },
+ {
+ seed: "6d25375591bbfce7f601fc5eb40e4f3dde2e453dc4bf31595d8ec29e4370cd80",
+ hash: "8d5579cd702c06c40fb98e1d55121ea0d29f3a6c42f5582b902ac243f29b571a",
+ },
+ {
+ seed: "7214b4c09f584c5ddff971d469df130b9a3c03e0277e92be159279de39462120",
+ hash: "3a4e8c72921099a0e6a4e7f979df4c8bced63063097835cdfd5ee94548c9c41a",
+ },
+ {
+ seed: "b13e78392d5446ae304b5fc9d45b85f26996982b2c0c86138afdac8d2ea9016e",
+ hash: "462efa1bf4f639ffaedb170d6fb8ba363efcb1bdf0c5aef0c75afb59806b8053",
+ },
+ {
+ seed: "9403bff4240a5999e17e0ab4a645d6942c3a7147c7834e092e461a4580249e6e",
+ hash: "68dd702ea7c7352632876e9dc2333142fce857a542726e402bb480cad364f260",
+ },
+ {
+ seed: "2665312a3e3628f4df0b9bc6334f530608a9bcdd4d1eef174ecda99f51a6db94",
+ hash: "5db72c31d575c332e60f890c7e68d59bd3d0ac53a832e06e821d819476e1f010",
+ },
+ {
+ seed: "6cb37532c80765b7c07698502a49d69351036f57a45a5143e33c57c236d841ca",
+ hash: "0deb20ec503b4c678213979fd98018c56f24e9c1ec99af3cd84b43c161a9bb5c",
+ },
+ {
+ seed: "8654a32fa120bfdb7ca02c487469070eba4b5a81b03763a2185fdf5afd756f3c",
+ hash: "36f3ede761aa683813013ffa84e3738b870ce7605e0a958ed4ffb540cd3ea504",
+ },
+ {
+ seed: "66d1945ceb6ef8014b1b6703cb624f058913e722f15d03225be27cb9d8aabe4a",
+ hash: "6bcb4819a96508efa7e32ee52b0227ccf5fbe5539687aae931677b24f6d0bbbd",
+ },
+ {
+ seed: "22c7623bf0e850538329e3e6d9a6f9b1235350824a3feaad2580b7a853550deb",
+ hash: "8bb257a1a17fd2233935b33441d216551d5ff1553d02e4013e03f14962615c16",
+ },
+ {
+ seed: "a5eebe3469d68c8922a1a8b5a0a2b55293b7ff424240c16feb9f51727f734516",
+ hash: "d6b780983a63a3e4bcf643ee68b686421079c835a99eeba6962fe41bb355f8da",
+ },
+ {
+ seed: "479ec3b589b14aa7290b48c2e64072e4e5b15ce395d2072a5a18b0a2cf35f3fd",
+ hash: "39c5f108e7017e085fe90acfd719420740e57768ac14c94cb020d87e36d06752",
+ },
+ {
+ seed: "63952334b731ec91d88c54614925576f82e3610d009657368fc866e7b1efbe73",
+ hash: "79f654976732106c0e4a97ab3b6d16f343a05ebfcc2e1d679d69d396e6162a77",
+ },
+ {
+ seed: "256472ee754ef6af096340ab1e161f58e85fb0cc7ae6e6866b9359a1657fa6c1",
+ hash: "387883b86e2acc153aa334518cea48c0c481b573ccaacf17c575623c392f78b2",
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.seed, func(t *testing.T) {
+ seed := MustDecodeHex(tc.seed)
+ if len(seed) != 32 {
+ t.Fatal("expected seed to be 32 bytes")
+ }
+ hash := Secp256k1Hash(seed)
+ if len(hash) != 32 {
+ t.Fatal("expected hash to be 32 bytes")
+ }
+ if !bytes.Equal(MustDecodeHex(tc.hash), hash) {
+ t.Fatal("secp256k1Hash does not match")
+ }
+ })
+ }
+}
+
+func TestSecp256k1Equal(t *testing.T) {
for i := 0; i < 64; i++ {
seed := RandByte(128)
hash1 := Secp256k1Hash(seed)
hash2, _, _ := DeterministicKeyPairIterator(seed)
- if bytes.Equal(hash1, hash2) == false {
+ if !bytes.Equal(hash1, hash2) {
t.Fail()
}
}
}
-func Test_DeterministicWalletGeneration(t *testing.T) {
- in := "8654a32fa120bfdb7ca02c487469070eba4b5a81b03763a2185fdf5afd756f3c"
- secOut := "10ba0325f1b8633ca463542950b5cd5f97753a9829ba23477c584e7aee9cfbd5"
- pubOut := "0249964ac7e3fe1b2c182a2f10abe031784e374cc0c665a63bc76cc009a05bc7c6"
-
- var seed = []byte(in)
- var pubkey []byte
- var seckey []byte
-
- for i := 0; i < 1024; i++ {
- seed, pubkey, seckey = DeterministicKeyPairIterator(seed)
- }
-
- if bytes.Equal(seckey, Decode(secOut)) == false {
- t.Fail()
- }
-
- if bytes.Equal(pubkey, Decode(pubOut)) == false {
- t.Fail()
- }
-}
-
-func Test_ECDH(t *testing.T) {
-
+func TestECDH(t *testing.T) {
pubkey1, seckey1 := GenerateKeyPair()
pubkey2, seckey2 := GenerateKeyPair()
@@ -514,14 +749,12 @@ func Test_ECDH(t *testing.T) {
t.Fail()
}
- if bytes.Equal(puba, pubb) == false {
+ if !bytes.Equal(puba, pubb) {
t.Fail()
}
-
}
-func Test_ECDH2(t *testing.T) {
-
+func TestECDH2(t *testing.T) {
for i := 0; i < 16*1024; i++ {
pubkey1, seckey1 := GenerateKeyPair()
@@ -538,37 +771,18 @@ func Test_ECDH2(t *testing.T) {
t.Fail()
}
- if bytes.Equal(puba, pubb) == false {
+ if !bytes.Equal(puba, pubb) {
t.Fail()
}
}
}
-/*
-seed = ee78b2fb5bef47aaab1abf54106b3b022ed3d68fdd24b5cfdd6e639e1c7baa6f
-seckey = 929c5f23a17115199e61b2c4c38fea06f763270a0d1189fbc6a46ddac05081fa
-pubkey1 = 028a4d9f32e7bd25befd0afa9e73755f35ae2f7012dfc7c000252f2afba2589af2
-pubkey2 = 028a4d9f32e7bd25befd0afa9e73755f35ae2f7012dfc80000252f2afba2589af2
-key_wif = L28hjib16NuBT4L1gK4DgzKjjxaCDggeZpXFy93MdZVz9fTZKwiE
-btc_addr1 = 14mvZw1wC8nKtycrTHu6NRTfWHuNVCpRgL
-btc_addr2 = 1HuwS7qARGMgNB7zao1FPmqiiZ92tsJGpX
-deterministic pubkeys do not match
-seed = 0e86692d755fd39a51acf6c935bdf425a6aad03a7914867e3f6db27371c966b4
-seckey = c9d016b26102fb309a73e644f6be308614a1b8f6f46f902c906ffaf0993ee63c
-pubkey1 = 03e86d62256dd05c2852c05a6b11d423f278288abeab490000b93d387de45a2f73
-pubkey2 = 03e86d62256dd05c2852c05a6b11d423f278288abeab494000b93d387de45a2f73
-key_wif = L3z1TTmgddKUm2Em22zKwLXGZ7jfwXLN5GxebpgH5iohaRJSm98D
-btc_addr1 = 1CcrzXvK34Cf4jzTko5uhCwbsC6e6K4rHw
-btc_addr2 = 1GtBH7dcZnh69Anqe8sHXKSJ9Dk4jXGHyp
-*/
-
-func Test_Abnormal_Keys(t *testing.T) {
-
- for i := 0; i < 32*1024; i++ {
-
+func TestDeterministicKeyPairIteratorStepRandom(t *testing.T) {
+ n := 32 * 1024
+ for i := 0; i < n; i++ {
seed := RandByte(32)
- pubkey1, seckey1 := generateDeterministicKeyPair(seed)
+ pubkey1, seckey1 := deterministicKeyPairIteratorStep(seed)
if seckey1 == nil {
t.Fail()
@@ -581,27 +795,70 @@ func Test_Abnormal_Keys(t *testing.T) {
if VerifyPubkey(pubkey1) != 1 {
seedHex := hex.EncodeToString(seed)
seckeyHex := hex.EncodeToString(seckey1)
- log.Printf("seed= %s", seedHex)
- log.Printf("seckey= %s", seckeyHex)
+ t.Logf("seed=%s", seedHex)
+ t.Logf("seckey=%s", seckeyHex)
t.Errorf("GenerateKeyPair, generates key that fails validation, run=%d", i)
}
}
}
-//problem seckeys
-var _testSeckey = []string{
- "08efb79385c9a8b0d1c6f5f6511be0c6f6c2902963d874a3a4bacc18802528d3",
- "78298d9ecdc0640c9ae6883201a53f4518055442642024d23c45858f45d0c3e6",
- "04e04fe65bfa6ded50a12769a3bd83d7351b2dbff08c9bac14662b23a3294b9e",
- "2f5141f1b75747996c5de77c911dae062d16ae48799052c04ead20ccd5afa113",
-}
-
-//test known bad keys
-func Test_Abnormal_Keys2(t *testing.T) {
-
- for i := 0; i < len(_testSeckey); i++ {
+// problematic seckeys
+// See: https://github.com/piotrnar/gocoin/issues/15
+// For additional information on the origin of these test vectors
+var abnormalSecKeys = []struct {
+ sec string
+ pub string
+ ecdh []string
+}{
+ {
+ sec: "08efb79385c9a8b0d1c6f5f6511be0c6f6c2902963d874a3a4bacc18802528d3",
+ pub: "03c74332d6094b1f603d4902fc6b1aa09fb3ef81f3015a4000cc0077ff70543c16",
+ ecdh: []string{
+ "",
+ "02e72655a3adf8308a078ee6fe948cf6baf95ef626b1e1fe6e434c737c7c2fef4e",
+ "03222fe59be5a69c38364dd313bd077b8b1c2216804a4a727e0078b3c77778bc45",
+ "021096aa98231eaa949542be029a1f3a93815e05e243c69e73d7449d719ff5d76d",
+ },
+ },
+ {
+ sec: "78298d9ecdc0640c9ae6883201a53f4518055442642024d23c45858f45d0c3e6",
+ pub: "02fa3e6e0b1fb76e26dffe7b1e01fd02677fedfed23a59000092c706b04214bee3",
+ ecdh: []string{
+ "02e72655a3adf8308a078ee6fe948cf6baf95ef626b1e1fe6e434c737c7c2fef4e",
+ "",
+ "025617125b44ded369deed72f833535d56a3ed035afc44ff64fb7c65986f6ea2a5",
+ "03849b3f906180cf27c161045e9da551a44476b0d4f7f29d668ba17569953d0a11",
+ },
+ },
+ {
+ sec: "04e04fe65bfa6ded50a12769a3bd83d7351b2dbff08c9bac14662b23a3294b9e",
+ pub: "034f25c9400dd0f87a9c420b35b5a157d21caa086ef8fa00015bc3c8ab73a1cc4c",
+ ecdh: []string{
+ "03222fe59be5a69c38364dd313bd077b8b1c2216804a4a727e0078b3c77778bc45",
+ "025617125b44ded369deed72f833535d56a3ed035afc44ff64fb7c65986f6ea2a5",
+ "",
+ "03fd41f8d279e2df640f17aef31c258a0a9aa6ddcaf4c4bc80f71dccff576b630c",
+ },
+ },
+ {
+ sec: "2f5141f1b75747996c5de77c911dae062d16ae48799052c04ead20ccd5afa113",
+ pub: "03fe58baefc491a9dcf0939ab6252f81f6d9515105bd89c000bb7f2a694e8a8b72",
+ ecdh: []string{
+ "021096aa98231eaa949542be029a1f3a93815e05e243c69e73d7449d719ff5d76d",
+ "03849b3f906180cf27c161045e9da551a44476b0d4f7f29d668ba17569953d0a11",
+ "03fd41f8d279e2df640f17aef31c258a0a9aa6ddcaf4c4bc80f71dccff576b630c",
+ "",
+ },
+ },
+}
+
+func TestAbnormalKeys2(t *testing.T) {
+ for _, tc := range abnormalSecKeys {
+ seckey1, err := hex.DecodeString(tc.sec)
+ if err != nil {
+ t.Error(err)
+ }
- seckey1, _ := hex.DecodeString(_testSeckey[i])
pubkey1 := PubkeyFromSeckey(seckey1)
if pubkey1 == nil {
t.Fail()
@@ -616,50 +873,121 @@ func Test_Abnormal_Keys2(t *testing.T) {
}
if VerifyPubkey(pubkey1) != 1 {
- t.Errorf("generates key that fails validation")
+ t.Error("generates key that fails validation")
}
- }
-}
-func _pairGen(seckey []byte) []byte {
- return nil
+ hpubkey1 := hex.EncodeToString(pubkey1)
+ if hpubkey1 != tc.pub {
+ t.Errorf("pubkey does not match %s != %s", hpubkey1, tc.pub)
+ }
+ }
}
-//ECDH test
-func Test_Abnormal_Keys3(t *testing.T) {
-
- for i := 0; i < len(_testSeckey); i++ {
+func TestAbnormalKeys3(t *testing.T) {
+ // ECDH test
+ for i, tc := range abnormalSecKeys {
+ seckey1, err := hex.DecodeString(tc.sec)
+ if err != nil {
+ t.Error(err)
+ }
- seckey1, _ := hex.DecodeString(_testSeckey[i])
pubkey1 := PubkeyFromSeckey(seckey1)
- seckey2, _ := hex.DecodeString(_testSeckey[rand.Int()%len(_testSeckey)])
- pubkey2 := PubkeyFromSeckey(seckey2)
-
if pubkey1 == nil {
- t.Errorf("pubkey1 nil")
- }
-
- if pubkey2 == nil {
- t.Errorf("pubkey2 nil")
- }
- //pubkey1, seckey1 := GenerateKeyPair()
- //pubkey2, seckey2 := GenerateKeyPair()
-
- puba := ECDH(pubkey1, seckey2)
- pubb := ECDH(pubkey2, seckey1)
-
- if puba == nil {
- t.Fail()
- }
-
- if pubb == nil {
- t.Fail()
- }
-
- if bytes.Equal(puba, pubb) == false {
- t.Errorf("recovered do not match")
- }
+ t.Error("pubkey1 nil")
+ }
+
+ if hex.EncodeToString(pubkey1) != tc.pub {
+ t.Error("pubkey1 does not match")
+ }
+
+ for j, tc2 := range abnormalSecKeys {
+ if i == j {
+ continue
+ }
+
+ seckey2, err := hex.DecodeString(tc2.sec)
+ if err != nil {
+ t.Error(err)
+ }
+ pubkey2 := PubkeyFromSeckey(seckey2)
+
+ if pubkey2 == nil {
+ t.Error("pubkey2 nil")
+ }
+
+ if hex.EncodeToString(pubkey2) != tc2.pub {
+ t.Error("pubkey2 does not match")
+ }
+
+ puba := ECDH(pubkey1, seckey2)
+ pubb := ECDH(pubkey2, seckey1)
+
+ if puba == nil {
+ t.Fail()
+ }
+
+ if pubb == nil {
+ t.Fail()
+ }
+
+ if !bytes.Equal(puba, pubb) {
+ t.Error("recovered ecdh keys do not match")
+ }
+
+ hpuba := hex.EncodeToString(puba)
+ if hpuba != tc.ecdh[j] {
+ t.Errorf("ecdh does not match %d,%d %s != %s", i, j, hpuba, tc.ecdh[j])
+ }
+ }
+
+ }
+}
+
+func TestDeterministicKeyPairIterator2(t *testing.T) {
+ cases := []struct {
+ seed string
+ sec string
+ pub string
+ n int
+ }{
+ {
+ seed: "67c53b28b8c7b06be53b490c28c0a3b77724b5c31c4bf12b71cd44c6bb4586f3",
+ sec: "68c751a58f48d656e4d3ec31f6c1016e6e36583ac2f63129f576b29e764469b5",
+ pub: "02c32556c48bfe944e4b8f6ecb6c884112c71a468247d338cbbdc9c561ab7c6d3d",
+ n: 1,
+ },
+ {
+ seed: "38363534613332666131323062666462376361303263343837343639303730656261346235613831623033373633613231383566646635616664373536663363",
+ sec: "10ba0325f1b8633ca463542950b5cd5f97753a9829ba23477c584e7aee9cfbd5",
+ pub: "0249964ac7e3fe1b2c182a2f10abe031784e374cc0c665a63bc76cc009a05bc7c6",
+ n: 1024,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.seed, func(t *testing.T) {
+ seed, err := hex.DecodeString(tc.seed)
+ if err != nil {
+ t.Fatalf("hex.DecodeString(%s) failed: %v", tc.seed, err)
+ return
+ }
+
+ var pub []byte
+ var sec []byte
+ for i := 0; i < tc.n; i++ {
+ seed, pub, sec = DeterministicKeyPairIterator(seed)
+ }
+
+ hsec := hex.EncodeToString(sec)
+ if hsec != tc.sec {
+ t.Fatalf("secret key does not match %s != %s", hsec, tc.sec)
+ }
+
+ hpub := hex.EncodeToString(pub)
+ if hpub != tc.pub {
+ t.Fatalf("public key does not match %s != %s", hpub, tc.pub)
+ }
+ })
}
-
}
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/ec_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/ec_test.go
index 109694a..bef40af 100644
--- a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/ec_test.go
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/ec_test.go
@@ -123,8 +123,8 @@ func TestECmult(t *testing.T) {
pubkeyj.ECmult(&pr, &u2, &u1)
if !pr.Equals(&expres) {
t.Error("ECmult failed")
- pr.Print("got")
- expres.Print("exp")
+ t.Logf("Got: %s", pr)
+ t.Logf("Exp: %s", expres)
}
}
@@ -204,9 +204,8 @@ func TestPrecompileGej(t *testing.T) {
func TestMultGen(t *testing.T) {
var nonce Number
var ex, ey, ez Field
- var r XYZ
nonce.SetHex("9E3CD9AB0F32911BFDE39AD155F527192CE5ED1F51447D63C4F154C118DA598E")
- ECmultGen(&r, &nonce)
+ r := ECmultGen(nonce)
ex.SetHex("02D1BF36D37ACD68E4DD00DB3A707FD176A37E42F81AEF9386924032D3428FF0")
ey.SetHex("FD52E285D33EC835230EA69F89D9C38673BD5B995716A4063C893AF02F938454")
ez.SetHex("4C6ACE7C8C062A1E046F66FD8E3981DC4E8E844ED856B5415C62047129268C1B")
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/field_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/field_test.go
index 24f3b5a..0a97864 100644
--- a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/field_test.go
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/field_test.go
@@ -18,7 +18,10 @@ func TestFeInv(t *testing.T) {
func BenchmarkFieldSqrt(b *testing.B) {
var dat [32]byte
var f, tmp Field
- rand.Read(dat[:])
+ _, err := rand.Read(dat[:])
+ if err != nil {
+ b.Error(err)
+ }
f.SetB32(dat[:])
for i := 0; i < b.N; i++ {
f.Sqrt(&tmp)
@@ -28,7 +31,10 @@ func BenchmarkFieldSqrt(b *testing.B) {
func BenchmarkFieldInv(b *testing.B) {
var dat [32]byte
var f, tmp Field
- rand.Read(dat[:])
+ _, err := rand.Read(dat[:])
+ if err != nil {
+ b.Error(err)
+ }
f.SetB32(dat[:])
for i := 0; i < b.N; i++ {
f.Inv(&tmp)
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/sig_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/sig_test.go
index d87c487..15641a7 100644
--- a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/sig_test.go
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/sig_test.go
@@ -2,52 +2,153 @@ package secp256k1go
import (
"encoding/hex"
- "strconv"
+ "fmt"
"testing"
)
+func TestSigForceLowS(t *testing.T) {
+ // forceLowS was a hardcoded parameter that forced the Signature's S point
+ // to be "low", i.e. below the half-order of the curve.
+ // This is necessary to break signature malleability and should always be on,
+ // so the forceLowS parameter was removed, and the current code is equivalent
+ // to forceLowS=true.
+
+ // Check that forceLowS forces the S point to the lower half of the curve
+ var sec, msg, non Number
+ sec.SetHex("7A642C99F7719F57D8F4BEB11A303AFCD190243A51CED8782CA6D3DBE014D146")
+ msg.SetHex("DD72CBF2203C1A55A411EEC4404AF2AFB2FE942C434B23EFE46E9F04DA8433CA")
+ non.SetHex("9F3CD9AB0F32911BFDE39AD155F527192CE5ED1F51447D63C4F154C118DA598E")
+
+ // The signature when forceLowS is true (not malleable)
+ sigHexLowS := "8c20a668be1b5a910205de46095023fe4823a3757f4417114168925f28193bff520ce833da9313d726f2a4d481e3195a5dd8e935a6c7f4dc260ed4c66ebe6da7"
+ // The signature when forceLowS is false (malleable)
+ // "8c20a668be1b5a910205de46095023fe4823a3757f4417114168925f28193bffadf317cc256cec28d90d5b2b7e1ce6a45cd5f3b10880ab5f99c389c66177d39a"
+
+ var sig Signature
+ var recid int
+ res := sig.Sign(&sec, &msg, &non, &recid)
+ if res != 1 {
+ t.Error("res failed", res)
+ return
+ }
+
+ if recid != 0 {
+ t.Error("recid should be 0 because of forceLowS")
+ }
+ if sigHexLowS != hex.EncodeToString(sig.Bytes()) {
+ t.Error("forceLowS did not modify the S point as expected")
+ }
+}
+
func TestSigRecover(t *testing.T) {
- var vs = [][6]string{
+ cases := []struct {
+ r string
+ s string
+ msg string
+ recid int
+ x string
+ y string
+ }{
{
- "6028b9e3a31c9e725fcbd7d5d16736aaaafcc9bf157dfb4be62bcbcf0969d488",
- "036d4a36fa235b8f9f815aa6f5457a607f956a71a035bf0970d8578bf218bb5a",
- "9cff3da1a4f86caf3683f865232c64992b5ed002af42b321b8d8a48420680487",
- "0",
- "56dc5df245955302893d8dda0677cc9865d8011bc678c7803a18b5f6faafec08",
- "54b5fbdcd8fac6468dac2de88fadce6414f5f3afbb103753e25161bef77705a6",
+ r: "6028b9e3a31c9e725fcbd7d5d16736aaaafcc9bf157dfb4be62bcbcf0969d488",
+ s: "036d4a36fa235b8f9f815aa6f5457a607f956a71a035bf0970d8578bf218bb5a",
+ msg: "9cff3da1a4f86caf3683f865232c64992b5ed002af42b321b8d8a48420680487",
+ recid: 0,
+ x: "56dc5df245955302893d8dda0677cc9865d8011bc678c7803a18b5f6faafec08",
+ y: "54b5fbdcd8fac6468dac2de88fadce6414f5f3afbb103753e25161bef77705a6",
},
{
- "b470e02f834a3aaafa27bd2b49e07269e962a51410f364e9e195c31351a05e50",
- "560978aed76de9d5d781f87ed2068832ed545f2b21bf040654a2daff694c8b09",
- "9ce428d58e8e4caf619dc6fc7b2c2c28f0561654d1f80f322c038ad5e67ff8a6",
- "1",
- "15b7e7d00f024bffcd2e47524bb7b7d3a6b251e23a3a43191ed7f0a418d9a578",
- "bf29a25e2d1f32c5afb18b41ae60112723278a8af31275965a6ec1d95334e840",
+ r: "b470e02f834a3aaafa27bd2b49e07269e962a51410f364e9e195c31351a05e50",
+ s: "560978aed76de9d5d781f87ed2068832ed545f2b21bf040654a2daff694c8b09",
+ msg: "9ce428d58e8e4caf619dc6fc7b2c2c28f0561654d1f80f322c038ad5e67ff8a6",
+ recid: 1,
+ x: "15b7e7d00f024bffcd2e47524bb7b7d3a6b251e23a3a43191ed7f0a418d9a578",
+ y: "bf29a25e2d1f32c5afb18b41ae60112723278a8af31275965a6ec1d95334e840",
},
}
- var sig Signature
- var pubkey, exp XY
- var msg Number
+ for _, tc := range cases {
+ t.Run(fmt.Sprintf("%s,%s", tc.r, tc.s), func(t *testing.T) {
+ var sig Signature
+ var pubkey, expectedPubkey XY
+ var msg Number
+
+ sig.R.SetHex(tc.r)
+ sig.S.SetHex(tc.s)
+ msg.SetHex(tc.msg)
+
+ expectedPubkey.X.SetHex(tc.x)
+ expectedPubkey.Y.SetHex(tc.y)
- for i := range vs {
- sig.R.SetHex(vs[i][0])
- sig.S.SetHex(vs[i][1])
- msg.SetHex(vs[i][2])
- rid, _ := strconv.ParseInt(vs[i][3], 10, 32)
- exp.X.SetHex(vs[i][4])
- exp.Y.SetHex(vs[i][5])
-
- if sig.Recover(&pubkey, &msg, int(rid)) {
- if !exp.X.Equals(&pubkey.X) {
- t.Error("X mismatch at vector", i)
+ if sig.Recover(&pubkey, &msg, tc.recid) {
+ if !expectedPubkey.X.Equals(&pubkey.X) {
+ t.Error("X mismatch")
+ }
+ if !expectedPubkey.Y.Equals(&pubkey.Y) {
+ t.Error("Y mismatch")
+ }
+ } else {
+ t.Error("sig.Recover failed")
}
- if !exp.Y.Equals(&pubkey.Y) {
- t.Error("Y mismatch at vector", i)
+ })
+ }
+}
+
+func TestSigRecover2(t *testing.T) {
+ cases := []struct {
+ msg string
+ sig string
+ pubkey string
+ recoverFails bool
+ }{
+ {
+ msg: "016b81623cf98f45879f3a48fa34af77dde44b2ffa0ddd2bf9edb386f76ec0ef",
+ sig: "d2a8ec2b29ce3cf3e6048296188adff4b5dfcb337c1d1157f28654e445bb940b4e47d6b0c7ba43d072bf8618775f123a435e8d1a150cb39bbb1aa80da8c57ea100",
+ pubkey: "03c0b0e24d55255f7aefe3da7a947a63028b573f45356a9c22e9a3c103fd00c3d1",
+ },
+
+ {
+ msg: "176b81623cf98f45879f3a48fa34af77dde44b2ffa0ddd2bf9edb386f76ec0ef",
+ sig: "d2a8ec2b20ce3cf3e6048296188adff4b5dfcb337c1d1157f28654e445bb940b4e47d6b0c7ba43d072bf8618775f123a435e8d1a150cb39bbb1aa80da8c57ea100",
+ pubkey: "03cee91b6d329e00c344ad5d67cfd00d885ec36e8975b5d9097738939cb8c08b31",
+ },
+ {
+ msg: "176b81623cf98f45879f3a48fa34af77dde44b2ffa0ddd2bf9edb386f76ec0ef",
+ sig: "d201ec2b29ce3cf3e6048296188adff4b5dfcb337c1d1157f28654e445bb940b4e47d6b0c7ba43d072bf8618775f123a435e8d1a150cb39bbb1aa80da8c57ea100",
+ recoverFails: true,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(fmt.Sprintf("%s-%s", tc.msg, tc.sig), func(t *testing.T) {
+ var sig Signature
+ var pubkey XY
+ var msg Number
+
+ sigBytes, err := hex.DecodeString(tc.sig)
+ if err != nil {
+ t.Error("invalid sig hex")
}
- } else {
- t.Error("sig.recover fialed")
- }
+ recid := int(sigBytes[64])
+ sig.R.SetBytes(sigBytes[:32])
+ sig.S.SetBytes(sigBytes[32:64])
+ msg.SetHex(tc.msg)
+
+ if sig.Recover(&pubkey, &msg, recid) {
+ if tc.recoverFails {
+ t.Error("sig.Recover expected to fail")
+ }
+
+ pubkeyHex := hex.EncodeToString(pubkey.Bytes())
+ if tc.pubkey != pubkeyHex {
+ t.Errorf("pubkey does not match %s != %s", tc.pubkey, pubkeyHex)
+ }
+ } else {
+ if !tc.recoverFails {
+ t.Error("sig.Recover failed")
+ }
+ }
+ })
}
}
@@ -78,8 +179,14 @@ func TestSigVerify(t *testing.T) {
msg.SetHex("2c43a883f4edc2b66c67a7a355b9312a565bb3d33bb854af36a06669e2028377")
sig.R.SetHex("6b2fa9344462c958d4a674c2a42fbedf7d6159a5276eb658887e2e1b3915329b")
sig.S.SetHex("eddc6ea7f190c14a0aa74e41519d88d2681314f011d253665f301425caf86b86")
- xy, _ := hex.DecodeString("02a60d70cfba37177d8239d018185d864b2bdd0caf5e175fd4454cc006fd2d75ac")
- key.ParsePubkey(xy)
+ xy, err := hex.DecodeString("02a60d70cfba37177d8239d018185d864b2bdd0caf5e175fd4454cc006fd2d75ac")
+ if err != nil {
+ t.Fail()
+ }
+
+ if err := key.ParsePubkey(xy); err != nil {
+ t.Errorf("ParsePubkey failed: %v", err)
+ }
if !sig.Verify(&key, &msg) {
t.Error("sig.Verify 2")
}
@@ -96,27 +203,121 @@ func TestSigSign(t *testing.T) {
if res != 1 {
t.Error("res failed", res)
}
- if forceLowS {
- if recid != 0 {
- t.Error("recid failed", recid)
- }
- } else {
- if recid != 1 {
- t.Error("recid failed", recid)
- }
+ if recid != 0 {
+ t.Error("recid failed", recid)
}
non.SetHex("98f9d784ba6c5c77bb7323d044c0fc9f2b27baa0a5b0718fe88596cc56681980")
if sig.R.Cmp(&non.Int) != 0 {
t.Error("R failed", sig.R.String())
}
- if forceLowS {
- non.SetHex("1ca662aaefd6cc958ba4604fea999db133a75bf34c13334dabac7124ff0cfcc1")
- } else {
- non.SetHex("E3599D551029336A745B9FB01566624D870780F363356CEE1425ED67D1294480")
- }
+ non.SetHex("1ca662aaefd6cc958ba4604fea999db133a75bf34c13334dabac7124ff0cfcc1")
if sig.S.Cmp(&non.Int) != 0 {
t.Error("S failed", sig.S.String())
}
+ expectSig := "98f9d784ba6c5c77bb7323d044c0fc9f2b27baa0a5b0718fe88596cc566819801ca662aaefd6cc958ba4604fea999db133a75bf34c13334dabac7124ff0cfcc1"
+ if expectSig != hex.EncodeToString(sig.Bytes()) {
+ t.Error("signature doesnt match")
+ }
+}
+
+func TestSigSignRecover(t *testing.T) {
+ cases := []struct {
+ // inputs
+ seckey string
+ digest string
+ nonce string
+
+ // outputs
+ sig string
+ recid int
+ pubkey string
+ }{
+ {
+ seckey: "597e27368656cab3c82bfcf2fb074cefd8b6101781a27709ba1b326b738d2c5a",
+ digest: "001aa9e416aff5f3a3c7f9ae0811757cf54f393d50df861f5c33747954341aa7",
+ nonce: "01",
+ sig: "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179804641a7472bb90647fa60b4d30aef8c7279e4b68226f7b2713dab712ef122f8b",
+ recid: 1,
+ pubkey: "02df09821cff4874198a1dbdc462d224bd99728eeed024185879225762376132c7",
+ },
+
+ {
+ seckey: "597e27368656cab3c82bfcf2fb074cefd8b6101781a27709ba1b326b738d2c5a",
+ digest: "001aa9e416aff5f3a3c7f9ae0811757cf54f393d50df861f5c33747954341aa7",
+ nonce: "fe25",
+ sig: "ee38f27be5f3c4b8db875c0ffbc0232e93f622d16ede888508a4920ab51c3c9906ea7426c5e251e4bea76f06f554fa7798a49b7968b400fa981c51531a5748d8",
+ recid: 1,
+ pubkey: "02df09821cff4874198a1dbdc462d224bd99728eeed024185879225762376132c7",
+ },
+
+ {
+ seckey: "597e27368656cab3c82bfcf2fb074cefd8b6101781a27709ba1b326b738d2c5a",
+ digest: "001aa9e416aff5f3a3c7f9ae0811757cf54f393d50df861f5c33747954341aa7",
+ nonce: "fe250100",
+ sig: "d4d869ad39cb3a64fa1980b47d1f19bd568430d3f929e01c00f1e5b7c6840ba85e08d5781986ee72d1e8ebd4dd050386a64eee0256005626d2acbe3aefee9e25",
+ recid: 0,
+ pubkey: "02df09821cff4874198a1dbdc462d224bd99728eeed024185879225762376132c7",
+ },
+
+ {
+ seckey: "67a331669081d22624f16512ea61e1d44cb3f26af3333973d17e0e8d03733b78",
+ digest: "001aa9e416aff5f3a3c7f9ae0811757cf54f393d50df861f5c33747954341aa7",
+ nonce: "1e2501ac",
+ sig: "eeee743d79b40aaa52d9eeb48791b0ae81a2f425bf99cdbc84180e8ed429300d457e8d669dbff1716b123552baf6f6f0ef67f16c1d9ccd44e6785d4240022126",
+ recid: 1,
+ pubkey: "0270b763664593c5f84dfb20d23ef79530fc317e5ee2ece0d9c50f432f62426ff9",
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(fmt.Sprintf("%s-%s-%s", tc.seckey, tc.digest, tc.nonce), func(t *testing.T) {
+ var sec, msg, non Number
+ var sig Signature
+ sec.SetHex(tc.seckey)
+ msg.SetHex(tc.digest)
+ non.SetHex(tc.nonce)
+
+ recid := 0
+ res := sig.Sign(&sec, &msg, &non, &recid)
+ if res != 1 {
+ t.Error("sig.Sign failed")
+ }
+
+ if recid != tc.recid {
+ t.Error("recid doesn't match")
+ }
+
+ sigHex := hex.EncodeToString(sig.Bytes())
+ if tc.sig != sigHex {
+ t.Errorf("signature doesn't match %s != %s", tc.sig, sigHex)
+ }
+
+ skb, err := hex.DecodeString(tc.seckey)
+ if err != nil {
+ t.Error(err)
+ }
+
+ derivedPk := GeneratePublicKey(skb)
+ if derivedPk == nil {
+ t.Error("failed to derive pubkey from seckey")
+ }
+ derivedPkHex := hex.EncodeToString(derivedPk)
+ if tc.pubkey != derivedPkHex {
+ t.Errorf("derived pubkey doesn't match %s != %s", tc.pubkey, derivedPkHex)
+ }
+
+ var pk XY
+ ret := sig.Recover(&pk, &msg, recid)
+ if !ret {
+ t.Error("sig.Recover failed")
+ }
+
+ pkHex := hex.EncodeToString(pk.Bytes())
+ if tc.pubkey != pkHex {
+ t.Errorf("recovered pubkey doesn't match %s != %s", tc.pubkey, pkHex)
+ }
+ })
+ }
}
func BenchmarkVerify(b *testing.B) {
diff --git a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/xyz_test.go b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/xyz_test.go
index b392785..0be6aff 100644
--- a/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/xyz_test.go
+++ b/vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/xyz_test.go
@@ -4,7 +4,7 @@ import (
"testing"
)
-func _TestGejDouble(t *testing.T) {
+func TestGejDouble(t *testing.T) {
var a, aExp, r XYZ
a.X.SetHex("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798")
a.Y.SetHex("483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8")
diff --git a/vendor/github.com/skycoin/skycoin/src/coin/block_body_skyencoder_test.go b/vendor/github.com/skycoin/skycoin/src/coin/block_body_skyencoder_test.go
new file mode 100644
index 0000000..6566e4b
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/coin/block_body_skyencoder_test.go
@@ -0,0 +1,421 @@
+// Code generated by github.com/skycoin/skyencoder. DO NOT EDIT.
+
+package coin
+
+import (
+ "bytes"
+ "fmt"
+ mathrand "math/rand"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/skycoin/encodertest"
+ "github.com/skycoin/skycoin/src/cipher/encoder"
+)
+
+func newEmptyBlockBodyForEncodeTest() *BlockBody {
+ var obj BlockBody
+ return &obj
+}
+
+func newRandomBlockBodyForEncodeTest(t *testing.T, rand *mathrand.Rand) *BlockBody {
+ var obj BlockBody
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 4,
+ MinRandLen: 1,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func newRandomZeroLenBlockBodyForEncodeTest(t *testing.T, rand *mathrand.Rand) *BlockBody {
+ var obj BlockBody
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 0,
+ MinRandLen: 0,
+ EmptySliceNil: false,
+ EmptyMapNil: false,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func newRandomZeroLenNilBlockBodyForEncodeTest(t *testing.T, rand *mathrand.Rand) *BlockBody {
+ var obj BlockBody
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 0,
+ MinRandLen: 0,
+ EmptySliceNil: true,
+ EmptyMapNil: true,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func testSkyencoderBlockBody(t *testing.T, obj *BlockBody) {
+ isEncodableField := func(f reflect.StructField) bool {
+ // Skip unexported fields
+ if f.PkgPath != "" {
+ return false
+ }
+
+ // Skip fields disabled with and enc:"- struct tag
+ tag := f.Tag.Get("enc")
+ return !strings.HasPrefix(tag, "-,") && tag != "-"
+ }
+
+ hasOmitEmptyField := func(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+ n := v.NumField()
+ f := t.Field(n - 1)
+ tag := f.Tag.Get("enc")
+ return isEncodableField(f) && strings.Contains(tag, ",omitempty")
+ default:
+ return false
+ }
+ }
+
+ // returns the number of bytes encoded by an omitempty field on a given object
+ omitEmptyLen := func(obj interface{}) uint64 {
+ if !hasOmitEmptyField(obj) {
+ return 0
+ }
+
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ n := v.NumField()
+ f := v.Field(n - 1)
+ if f.Len() == 0 {
+ return 0
+ }
+ return uint64(4 + f.Len())
+
+ default:
+ return 0
+ }
+ }
+
+ // encodeSize
+
+ n1 := encoder.Size(obj)
+ n2 := encodeSizeBlockBody(obj)
+
+ if uint64(n1) != n2 {
+ t.Fatalf("encoder.Size() != encodeSizeBlockBody() (%d != %d)", n1, n2)
+ }
+
+ // Encode
+
+ // encoder.Serialize
+ data1 := encoder.Serialize(obj)
+
+ // Encode
+ data2, err := encodeBlockBody(obj)
+ if err != nil {
+ t.Fatalf("encodeBlockBody failed: %v", err)
+ }
+ if uint64(len(data2)) != n2 {
+ t.Fatal("encodeBlockBody produced bytes of unexpected length")
+ }
+ if len(data1) != len(data2) {
+ t.Fatalf("len(encoder.Serialize()) != len(encodeBlockBody()) (%d != %d)", len(data1), len(data2))
+ }
+
+ // EncodeToBuffer
+ data3 := make([]byte, n2+5)
+ if err := encodeBlockBodyToBuffer(data3, obj); err != nil {
+ t.Fatalf("encodeBlockBodyToBuffer failed: %v", err)
+ }
+
+ if !bytes.Equal(data1, data2) {
+ t.Fatal("encoder.Serialize() != encode[1]s()")
+ }
+
+ // Decode
+
+ // encoder.DeserializeRaw
+ var obj2 BlockBody
+ if n, err := encoder.DeserializeRaw(data1, &obj2); err != nil {
+ t.Fatalf("encoder.DeserializeRaw failed: %v", err)
+ } else if n != uint64(len(data1)) {
+ t.Fatalf("encoder.DeserializeRaw failed: %v", encoder.ErrRemainingBytes)
+ }
+ if !cmp.Equal(*obj, obj2, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw result wrong")
+ }
+
+ // Decode
+ var obj3 BlockBody
+ if n, err := decodeBlockBody(data2, &obj3); err != nil {
+ t.Fatalf("decodeBlockBody failed: %v", err)
+ } else if n != uint64(len(data2)) {
+ t.Fatalf("decodeBlockBody bytes read length should be %d, is %d", len(data2), n)
+ }
+ if !cmp.Equal(obj2, obj3, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeBlockBody()")
+ }
+
+ // Decode, excess buffer
+ var obj4 BlockBody
+ n, err := decodeBlockBody(data3, &obj4)
+ if err != nil {
+ t.Fatalf("decodeBlockBody failed: %v", err)
+ }
+
+ if hasOmitEmptyField(&obj4) && omitEmptyLen(&obj4) == 0 {
+ // 4 bytes read for the omitEmpty length, which should be zero (see the 5 bytes added above)
+ if n != n2+4 {
+ t.Fatalf("decodeBlockBody bytes read length should be %d, is %d", n2+4, n)
+ }
+ } else {
+ if n != n2 {
+ t.Fatalf("decodeBlockBody bytes read length should be %d, is %d", n2, n)
+ }
+ }
+ if !cmp.Equal(obj2, obj4, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeBlockBody()")
+ }
+
+ // DecodeExact
+ var obj5 BlockBody
+ if err := decodeBlockBodyExact(data2, &obj5); err != nil {
+ t.Fatalf("decodeBlockBody failed: %v", err)
+ }
+ if !cmp.Equal(obj2, obj5, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeBlockBody()")
+ }
+
+ // Check that the bytes read value is correct when providing an extended buffer
+ if !hasOmitEmptyField(&obj3) || omitEmptyLen(&obj3) > 0 {
+ padding := []byte{0xFF, 0xFE, 0xFD, 0xFC}
+ data4 := append(data2[:], padding...)
+ if n, err := decodeBlockBody(data4, &obj3); err != nil {
+ t.Fatalf("decodeBlockBody failed: %v", err)
+ } else if n != uint64(len(data2)) {
+ t.Fatalf("decodeBlockBody bytes read length should be %d, is %d", len(data2), n)
+ }
+ }
+}
+
+func TestSkyencoderBlockBody(t *testing.T) {
+ rand := mathrand.New(mathrand.NewSource(time.Now().Unix()))
+
+ type testCase struct {
+ name string
+ obj *BlockBody
+ }
+
+ cases := []testCase{
+ {
+ name: "empty object",
+ obj: newEmptyBlockBodyForEncodeTest(),
+ },
+ }
+
+ nRandom := 10
+
+ for i := 0; i < nRandom; i++ {
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d", i),
+ obj: newRandomBlockBodyForEncodeTest(t, rand),
+ })
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d with zero length variable length contents", i),
+ obj: newRandomZeroLenBlockBodyForEncodeTest(t, rand),
+ })
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d with zero length variable length contents set to nil", i),
+ obj: newRandomZeroLenNilBlockBodyForEncodeTest(t, rand),
+ })
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ testSkyencoderBlockBody(t, tc.obj)
+ })
+ }
+}
+
+func decodeBlockBodyExpectError(t *testing.T, buf []byte, expectedErr error) {
+ var obj BlockBody
+ if _, err := decodeBlockBody(buf, &obj); err == nil {
+ t.Fatal("decodeBlockBody: expected error, got nil")
+ } else if err != expectedErr {
+ t.Fatalf("decodeBlockBody: expected error %q, got %q", expectedErr, err)
+ }
+}
+
+func decodeBlockBodyExactExpectError(t *testing.T, buf []byte, expectedErr error) {
+ var obj BlockBody
+ if err := decodeBlockBodyExact(buf, &obj); err == nil {
+ t.Fatal("decodeBlockBodyExact: expected error, got nil")
+ } else if err != expectedErr {
+ t.Fatalf("decodeBlockBodyExact: expected error %q, got %q", expectedErr, err)
+ }
+}
+
+func testSkyencoderBlockBodyDecodeErrors(t *testing.T, k int, tag string, obj *BlockBody) {
+ isEncodableField := func(f reflect.StructField) bool {
+ // Skip unexported fields
+ if f.PkgPath != "" {
+ return false
+ }
+
+ // Skip fields disabled with and enc:"- struct tag
+ tag := f.Tag.Get("enc")
+ return !strings.HasPrefix(tag, "-,") && tag != "-"
+ }
+
+ numEncodableFields := func(obj interface{}) int {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+
+ n := 0
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if !isEncodableField(f) {
+ continue
+ }
+ n++
+ }
+ return n
+ default:
+ return 0
+ }
+ }
+
+ hasOmitEmptyField := func(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+ n := v.NumField()
+ f := t.Field(n - 1)
+ tag := f.Tag.Get("enc")
+ return isEncodableField(f) && strings.Contains(tag, ",omitempty")
+ default:
+ return false
+ }
+ }
+
+ // returns the number of bytes encoded by an omitempty field on a given object
+ omitEmptyLen := func(obj interface{}) uint64 {
+ if !hasOmitEmptyField(obj) {
+ return 0
+ }
+
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ n := v.NumField()
+ f := v.Field(n - 1)
+ if f.Len() == 0 {
+ return 0
+ }
+ return uint64(4 + f.Len())
+
+ default:
+ return 0
+ }
+ }
+
+ n := encodeSizeBlockBody(obj)
+ buf, err := encodeBlockBody(obj)
+ if err != nil {
+ t.Fatalf("encodeBlockBody failed: %v", err)
+ }
+
+ // A nil buffer cannot decode, unless the object is a struct with a single omitempty field
+ if hasOmitEmptyField(obj) && numEncodableFields(obj) > 1 {
+ t.Run(fmt.Sprintf("%d %s buffer underflow nil", k, tag), func(t *testing.T) {
+ decodeBlockBodyExpectError(t, nil, encoder.ErrBufferUnderflow)
+ })
+
+ t.Run(fmt.Sprintf("%d %s exact buffer underflow nil", k, tag), func(t *testing.T) {
+ decodeBlockBodyExactExpectError(t, nil, encoder.ErrBufferUnderflow)
+ })
+ }
+
+ // Test all possible truncations of the encoded byte array, but skip
+ // a truncation that would be valid where omitempty is removed
+ skipN := n - omitEmptyLen(obj)
+ for i := uint64(0); i < n; i++ {
+ if i == skipN {
+ continue
+ }
+
+ t.Run(fmt.Sprintf("%d %s buffer underflow bytes=%d", k, tag, i), func(t *testing.T) {
+ decodeBlockBodyExpectError(t, buf[:i], encoder.ErrBufferUnderflow)
+ })
+
+ t.Run(fmt.Sprintf("%d %s exact buffer underflow bytes=%d", k, tag, i), func(t *testing.T) {
+ decodeBlockBodyExactExpectError(t, buf[:i], encoder.ErrBufferUnderflow)
+ })
+ }
+
+ // Append 5 bytes for omit empty with a 0 length prefix, to cause an ErrRemainingBytes.
+ // If only 1 byte is appended, the decoder will try to read the 4-byte length prefix,
+ // and return an ErrBufferUnderflow instead
+ if hasOmitEmptyField(obj) {
+ buf = append(buf, []byte{0, 0, 0, 0, 0}...)
+ } else {
+ buf = append(buf, 0)
+ }
+
+ t.Run(fmt.Sprintf("%d %s exact buffer remaining bytes", k, tag), func(t *testing.T) {
+ decodeBlockBodyExactExpectError(t, buf, encoder.ErrRemainingBytes)
+ })
+}
+
+func TestSkyencoderBlockBodyDecodeErrors(t *testing.T) {
+ rand := mathrand.New(mathrand.NewSource(time.Now().Unix()))
+ n := 10
+
+ for i := 0; i < n; i++ {
+ emptyObj := newEmptyBlockBodyForEncodeTest()
+ fullObj := newRandomBlockBodyForEncodeTest(t, rand)
+ testSkyencoderBlockBodyDecodeErrors(t, i, "empty", emptyObj)
+ testSkyencoderBlockBodyDecodeErrors(t, i, "full", fullObj)
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/coin/block_header_skyencoder_test.go b/vendor/github.com/skycoin/skycoin/src/coin/block_header_skyencoder_test.go
new file mode 100644
index 0000000..a77ad39
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/coin/block_header_skyencoder_test.go
@@ -0,0 +1,421 @@
+// Code generated by github.com/skycoin/skyencoder. DO NOT EDIT.
+
+package coin
+
+import (
+ "bytes"
+ "fmt"
+ mathrand "math/rand"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/skycoin/encodertest"
+ "github.com/skycoin/skycoin/src/cipher/encoder"
+)
+
+func newEmptyBlockHeaderForEncodeTest() *BlockHeader {
+ var obj BlockHeader
+ return &obj
+}
+
+func newRandomBlockHeaderForEncodeTest(t *testing.T, rand *mathrand.Rand) *BlockHeader {
+ var obj BlockHeader
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 4,
+ MinRandLen: 1,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func newRandomZeroLenBlockHeaderForEncodeTest(t *testing.T, rand *mathrand.Rand) *BlockHeader {
+ var obj BlockHeader
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 0,
+ MinRandLen: 0,
+ EmptySliceNil: false,
+ EmptyMapNil: false,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func newRandomZeroLenNilBlockHeaderForEncodeTest(t *testing.T, rand *mathrand.Rand) *BlockHeader {
+ var obj BlockHeader
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 0,
+ MinRandLen: 0,
+ EmptySliceNil: true,
+ EmptyMapNil: true,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func testSkyencoderBlockHeader(t *testing.T, obj *BlockHeader) {
+ isEncodableField := func(f reflect.StructField) bool {
+ // Skip unexported fields
+ if f.PkgPath != "" {
+ return false
+ }
+
+ // Skip fields disabled with and enc:"- struct tag
+ tag := f.Tag.Get("enc")
+ return !strings.HasPrefix(tag, "-,") && tag != "-"
+ }
+
+ hasOmitEmptyField := func(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+ n := v.NumField()
+ f := t.Field(n - 1)
+ tag := f.Tag.Get("enc")
+ return isEncodableField(f) && strings.Contains(tag, ",omitempty")
+ default:
+ return false
+ }
+ }
+
+ // returns the number of bytes encoded by an omitempty field on a given object
+ omitEmptyLen := func(obj interface{}) uint64 {
+ if !hasOmitEmptyField(obj) {
+ return 0
+ }
+
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ n := v.NumField()
+ f := v.Field(n - 1)
+ if f.Len() == 0 {
+ return 0
+ }
+ return uint64(4 + f.Len())
+
+ default:
+ return 0
+ }
+ }
+
+ // encodeSize
+
+ n1 := encoder.Size(obj)
+ n2 := encodeSizeBlockHeader(obj)
+
+ if uint64(n1) != n2 {
+ t.Fatalf("encoder.Size() != encodeSizeBlockHeader() (%d != %d)", n1, n2)
+ }
+
+ // Encode
+
+ // encoder.Serialize
+ data1 := encoder.Serialize(obj)
+
+ // Encode
+ data2, err := encodeBlockHeader(obj)
+ if err != nil {
+ t.Fatalf("encodeBlockHeader failed: %v", err)
+ }
+ if uint64(len(data2)) != n2 {
+ t.Fatal("encodeBlockHeader produced bytes of unexpected length")
+ }
+ if len(data1) != len(data2) {
+ t.Fatalf("len(encoder.Serialize()) != len(encodeBlockHeader()) (%d != %d)", len(data1), len(data2))
+ }
+
+ // EncodeToBuffer
+ data3 := make([]byte, n2+5)
+ if err := encodeBlockHeaderToBuffer(data3, obj); err != nil {
+ t.Fatalf("encodeBlockHeaderToBuffer failed: %v", err)
+ }
+
+ if !bytes.Equal(data1, data2) {
+ t.Fatal("encoder.Serialize() != encode[1]s()")
+ }
+
+ // Decode
+
+ // encoder.DeserializeRaw
+ var obj2 BlockHeader
+ if n, err := encoder.DeserializeRaw(data1, &obj2); err != nil {
+ t.Fatalf("encoder.DeserializeRaw failed: %v", err)
+ } else if n != uint64(len(data1)) {
+ t.Fatalf("encoder.DeserializeRaw failed: %v", encoder.ErrRemainingBytes)
+ }
+ if !cmp.Equal(*obj, obj2, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw result wrong")
+ }
+
+ // Decode
+ var obj3 BlockHeader
+ if n, err := decodeBlockHeader(data2, &obj3); err != nil {
+ t.Fatalf("decodeBlockHeader failed: %v", err)
+ } else if n != uint64(len(data2)) {
+ t.Fatalf("decodeBlockHeader bytes read length should be %d, is %d", len(data2), n)
+ }
+ if !cmp.Equal(obj2, obj3, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeBlockHeader()")
+ }
+
+ // Decode, excess buffer
+ var obj4 BlockHeader
+ n, err := decodeBlockHeader(data3, &obj4)
+ if err != nil {
+ t.Fatalf("decodeBlockHeader failed: %v", err)
+ }
+
+ if hasOmitEmptyField(&obj4) && omitEmptyLen(&obj4) == 0 {
+ // 4 bytes read for the omitEmpty length, which should be zero (see the 5 bytes added above)
+ if n != n2+4 {
+ t.Fatalf("decodeBlockHeader bytes read length should be %d, is %d", n2+4, n)
+ }
+ } else {
+ if n != n2 {
+ t.Fatalf("decodeBlockHeader bytes read length should be %d, is %d", n2, n)
+ }
+ }
+ if !cmp.Equal(obj2, obj4, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeBlockHeader()")
+ }
+
+ // DecodeExact
+ var obj5 BlockHeader
+ if err := decodeBlockHeaderExact(data2, &obj5); err != nil {
+ t.Fatalf("decodeBlockHeader failed: %v", err)
+ }
+ if !cmp.Equal(obj2, obj5, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeBlockHeader()")
+ }
+
+ // Check that the bytes read value is correct when providing an extended buffer
+ if !hasOmitEmptyField(&obj3) || omitEmptyLen(&obj3) > 0 {
+ padding := []byte{0xFF, 0xFE, 0xFD, 0xFC}
+ data4 := append(data2[:], padding...)
+ if n, err := decodeBlockHeader(data4, &obj3); err != nil {
+ t.Fatalf("decodeBlockHeader failed: %v", err)
+ } else if n != uint64(len(data2)) {
+ t.Fatalf("decodeBlockHeader bytes read length should be %d, is %d", len(data2), n)
+ }
+ }
+}
+
+func TestSkyencoderBlockHeader(t *testing.T) {
+ rand := mathrand.New(mathrand.NewSource(time.Now().Unix()))
+
+ type testCase struct {
+ name string
+ obj *BlockHeader
+ }
+
+ cases := []testCase{
+ {
+ name: "empty object",
+ obj: newEmptyBlockHeaderForEncodeTest(),
+ },
+ }
+
+ nRandom := 10
+
+ for i := 0; i < nRandom; i++ {
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d", i),
+ obj: newRandomBlockHeaderForEncodeTest(t, rand),
+ })
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d with zero length variable length contents", i),
+ obj: newRandomZeroLenBlockHeaderForEncodeTest(t, rand),
+ })
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d with zero length variable length contents set to nil", i),
+ obj: newRandomZeroLenNilBlockHeaderForEncodeTest(t, rand),
+ })
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ testSkyencoderBlockHeader(t, tc.obj)
+ })
+ }
+}
+
+func decodeBlockHeaderExpectError(t *testing.T, buf []byte, expectedErr error) {
+ var obj BlockHeader
+ if _, err := decodeBlockHeader(buf, &obj); err == nil {
+ t.Fatal("decodeBlockHeader: expected error, got nil")
+ } else if err != expectedErr {
+ t.Fatalf("decodeBlockHeader: expected error %q, got %q", expectedErr, err)
+ }
+}
+
+func decodeBlockHeaderExactExpectError(t *testing.T, buf []byte, expectedErr error) {
+ var obj BlockHeader
+ if err := decodeBlockHeaderExact(buf, &obj); err == nil {
+ t.Fatal("decodeBlockHeaderExact: expected error, got nil")
+ } else if err != expectedErr {
+ t.Fatalf("decodeBlockHeaderExact: expected error %q, got %q", expectedErr, err)
+ }
+}
+
+func testSkyencoderBlockHeaderDecodeErrors(t *testing.T, k int, tag string, obj *BlockHeader) {
+ isEncodableField := func(f reflect.StructField) bool {
+ // Skip unexported fields
+ if f.PkgPath != "" {
+ return false
+ }
+
+ // Skip fields disabled with and enc:"- struct tag
+ tag := f.Tag.Get("enc")
+ return !strings.HasPrefix(tag, "-,") && tag != "-"
+ }
+
+ numEncodableFields := func(obj interface{}) int {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+
+ n := 0
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if !isEncodableField(f) {
+ continue
+ }
+ n++
+ }
+ return n
+ default:
+ return 0
+ }
+ }
+
+ hasOmitEmptyField := func(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+ n := v.NumField()
+ f := t.Field(n - 1)
+ tag := f.Tag.Get("enc")
+ return isEncodableField(f) && strings.Contains(tag, ",omitempty")
+ default:
+ return false
+ }
+ }
+
+ // returns the number of bytes encoded by an omitempty field on a given object
+ omitEmptyLen := func(obj interface{}) uint64 {
+ if !hasOmitEmptyField(obj) {
+ return 0
+ }
+
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ n := v.NumField()
+ f := v.Field(n - 1)
+ if f.Len() == 0 {
+ return 0
+ }
+ return uint64(4 + f.Len())
+
+ default:
+ return 0
+ }
+ }
+
+ n := encodeSizeBlockHeader(obj)
+ buf, err := encodeBlockHeader(obj)
+ if err != nil {
+ t.Fatalf("encodeBlockHeader failed: %v", err)
+ }
+
+ // A nil buffer cannot decode, unless the object is a struct with a single omitempty field
+ if hasOmitEmptyField(obj) && numEncodableFields(obj) > 1 {
+ t.Run(fmt.Sprintf("%d %s buffer underflow nil", k, tag), func(t *testing.T) {
+ decodeBlockHeaderExpectError(t, nil, encoder.ErrBufferUnderflow)
+ })
+
+ t.Run(fmt.Sprintf("%d %s exact buffer underflow nil", k, tag), func(t *testing.T) {
+ decodeBlockHeaderExactExpectError(t, nil, encoder.ErrBufferUnderflow)
+ })
+ }
+
+ // Test all possible truncations of the encoded byte array, but skip
+ // a truncation that would be valid where omitempty is removed
+ skipN := n - omitEmptyLen(obj)
+ for i := uint64(0); i < n; i++ {
+ if i == skipN {
+ continue
+ }
+
+ t.Run(fmt.Sprintf("%d %s buffer underflow bytes=%d", k, tag, i), func(t *testing.T) {
+ decodeBlockHeaderExpectError(t, buf[:i], encoder.ErrBufferUnderflow)
+ })
+
+ t.Run(fmt.Sprintf("%d %s exact buffer underflow bytes=%d", k, tag, i), func(t *testing.T) {
+ decodeBlockHeaderExactExpectError(t, buf[:i], encoder.ErrBufferUnderflow)
+ })
+ }
+
+ // Append 5 bytes for omit empty with a 0 length prefix, to cause an ErrRemainingBytes.
+ // If only 1 byte is appended, the decoder will try to read the 4-byte length prefix,
+ // and return an ErrBufferUnderflow instead
+ if hasOmitEmptyField(obj) {
+ buf = append(buf, []byte{0, 0, 0, 0, 0}...)
+ } else {
+ buf = append(buf, 0)
+ }
+
+ t.Run(fmt.Sprintf("%d %s exact buffer remaining bytes", k, tag), func(t *testing.T) {
+ decodeBlockHeaderExactExpectError(t, buf, encoder.ErrRemainingBytes)
+ })
+}
+
+func TestSkyencoderBlockHeaderDecodeErrors(t *testing.T) {
+ rand := mathrand.New(mathrand.NewSource(time.Now().Unix()))
+ n := 10
+
+ for i := 0; i < n; i++ {
+ emptyObj := newEmptyBlockHeaderForEncodeTest()
+ fullObj := newRandomBlockHeaderForEncodeTest(t, rand)
+ testSkyencoderBlockHeaderDecodeErrors(t, i, "empty", emptyObj)
+ testSkyencoderBlockHeaderDecodeErrors(t, i, "full", fullObj)
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/coin/block_test.go b/vendor/github.com/skycoin/skycoin/src/coin/block_test.go
index c17aa97..faa6df4 100644
--- a/vendor/github.com/skycoin/skycoin/src/coin/block_test.go
+++ b/vendor/github.com/skycoin/skycoin/src/coin/block_test.go
@@ -1,25 +1,38 @@
-// build ignore
-
package coin
import (
"errors"
"fmt"
"testing"
+ "time"
"github.com/stretchr/testify/require"
"github.com/skycoin/skycoin/src/cipher"
"github.com/skycoin/skycoin/src/testutil"
+)
- "github.com/stretchr/testify/assert"
+var (
+ genPublic, genSecret = cipher.GenerateKeyPair()
+ genAddress = cipher.AddressFromPubKey(genPublic)
+ _genTime uint64 = 1000
+ _genCoins uint64 = 1000e6
+ _genCoinHours uint64 = 1000 * 1000
)
+func tNow() uint64 {
+ return uint64(time.Now().UTC().Unix())
+}
+
+func feeCalc(t *Transaction) (uint64, error) {
+ return 0, nil
+}
+
func badFeeCalc(t *Transaction) (uint64, error) {
return 0, errors.New("Bad")
}
-func makeNewBlock(uxHash cipher.SHA256) (*Block, error) {
+func makeNewBlock(t *testing.T, uxHash cipher.SHA256) *Block {
body := BlockBody{
Transactions: Transactions{Transaction{}},
}
@@ -34,13 +47,15 @@ func makeNewBlock(uxHash cipher.SHA256) (*Block, error) {
PrevHash: cipher.SHA256{},
BodyHash: body.Hash(),
}}
- return NewBlock(prev, 100+20, uxHash, Transactions{Transaction{}}, _feeCalc)
+ b, err := NewBlock(prev, 100+20, uxHash, Transactions{Transaction{}}, feeCalc)
+ require.NoError(t, err)
+ return b
}
func addTransactionToBlock(t *testing.T, b *Block) Transaction {
- tx := makeTransaction(t)
- b.Body.Transactions = append(b.Body.Transactions, tx)
- return tx
+ txn := makeTransaction(t)
+ b.Body.Transactions = append(b.Body.Transactions, txn)
+ return txn
}
func TestNewBlock(t *testing.T) {
@@ -53,10 +68,10 @@ func TestNewBlock(t *testing.T) {
require.EqualError(t, err, fmt.Sprintf("Invalid transaction fees: Bad"))
// no txns panics
- _, err = NewBlock(prev, 133, uxHash, nil, _feeCalc)
+ _, err = NewBlock(prev, 133, uxHash, nil, feeCalc)
require.EqualError(t, err, "Refusing to create block with no transactions")
- _, err = NewBlock(prev, 133, uxHash, Transactions{}, _feeCalc)
+ _, err = NewBlock(prev, 133, uxHash, Transactions{}, feeCalc)
require.EqualError(t, err, "Refusing to create block with no transactions")
// valid block is fine
@@ -66,35 +81,31 @@ func TestNewBlock(t *testing.T) {
return fee, nil
})
require.NoError(t, err)
- assert.Equal(t, b.Body.Transactions, txns)
- assert.Equal(t, b.Head.Fee, fee*uint64(len(txns)))
- assert.Equal(t, b.Body, BlockBody{Transactions: txns})
- assert.Equal(t, b.Head.PrevHash, prev.HashHeader())
- assert.Equal(t, b.Head.Time, currentTime)
- assert.Equal(t, b.Head.BkSeq, prev.Head.BkSeq+1)
- assert.Equal(t, b.Head.UxHash, uxHash)
+ require.Equal(t, b.Body.Transactions, txns)
+ require.Equal(t, b.Head.Fee, fee*uint64(len(txns)))
+ require.Equal(t, b.Body, BlockBody{Transactions: txns})
+ require.Equal(t, b.Head.PrevHash, prev.HashHeader())
+ require.Equal(t, b.Head.Time, currentTime)
+ require.Equal(t, b.Head.BkSeq, prev.Head.BkSeq+1)
+ require.Equal(t, b.Head.UxHash, uxHash)
}
func TestBlockHashHeader(t *testing.T) {
uxHash := testutil.RandSHA256(t)
- b, err := makeNewBlock(uxHash)
- require.NoError(t, err)
- assert.Equal(t, b.HashHeader(), b.Head.Hash())
- assert.NotEqual(t, b.HashHeader(), cipher.SHA256{})
+ b := makeNewBlock(t, uxHash)
+ require.Equal(t, b.HashHeader(), b.Head.Hash())
+ require.NotEqual(t, b.HashHeader(), cipher.SHA256{})
}
-func TestBlockHashBody(t *testing.T) {
+func TestBlockBodyHash(t *testing.T) {
uxHash := testutil.RandSHA256(t)
- b, err := makeNewBlock(uxHash)
- require.NoError(t, err)
- assert.Equal(t, b.HashBody(), b.Body.Hash())
- hb := b.HashBody()
+ b := makeNewBlock(t, uxHash)
+ hb := b.Body.Hash()
hashes := b.Body.Transactions.Hashes()
- tx := addTransactionToBlock(t, b)
- assert.NotEqual(t, b.HashBody(), hb)
- hashes = append(hashes, tx.Hash())
- assert.Equal(t, b.HashBody(), cipher.Merkle(hashes))
- assert.Equal(t, b.HashBody(), b.Body.Hash())
+ txn := addTransactionToBlock(t, b)
+ require.NotEqual(t, hb, b.Body.Hash())
+ hashes = append(hashes, txn.Hash())
+ require.Equal(t, b.Body.Hash(), cipher.Merkle(hashes))
}
func TestNewGenesisBlock(t *testing.T) {
@@ -109,19 +120,20 @@ func TestNewGenesisBlock(t *testing.T) {
require.Equal(t, cipher.SHA256{}, gb.Head.UxHash)
require.Equal(t, 1, len(gb.Body.Transactions))
- tx := gb.Body.Transactions[0]
- require.Len(t, tx.In, 0)
- require.Len(t, tx.Sigs, 0)
- require.Len(t, tx.Out, 1)
-
- require.Equal(t, genAddress, tx.Out[0].Address)
- require.Equal(t, _genCoins, tx.Out[0].Coins)
- require.Equal(t, _genCoins, tx.Out[0].Hours)
+ txn := gb.Body.Transactions[0]
+ require.Len(t, txn.In, 0)
+ require.Len(t, txn.Sigs, 0)
+ require.Len(t, txn.Out, 1)
+
+ require.Equal(t, genAddress, txn.Out[0].Address)
+ require.Equal(t, _genCoins, txn.Out[0].Coins)
+ require.Equal(t, _genCoins, txn.Out[0].Hours)
}
func TestCreateUnspent(t *testing.T) {
- tx := Transaction{}
- tx.PushOutput(genAddress, 11e6, 255)
+ txn := Transaction{}
+ err := txn.PushOutput(genAddress, 11e6, 255)
+ require.NoError(t, err)
bh := BlockHeader{
Time: tNow(),
BkSeq: uint64(1),
@@ -140,52 +152,53 @@ func TestCreateUnspent(t *testing.T) {
{
"index overflow",
10,
- errors.New("Transaction out index is overflow"),
+ errors.New("Transaction out index overflows transaction outputs"),
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
- uxout, err := CreateUnspent(bh, tx, tc.txIndex)
+ uxout, err := CreateUnspent(bh, txn, tc.txIndex)
require.Equal(t, tc.err, err)
if err != nil {
return
}
- assertUnspent(t, bh, tx, tc.txIndex, uxout)
+ requireUnspent(t, bh, txn, tc.txIndex, uxout)
})
}
}
func TestCreateUnspents(t *testing.T) {
- tx := Transaction{}
- tx.PushOutput(genAddress, 11e6, 255)
+ txn := Transaction{}
+ err := txn.PushOutput(genAddress, 11e6, 255)
+ require.NoError(t, err)
bh := BlockHeader{
Time: tNow(),
BkSeq: uint64(1),
}
- uxouts := CreateUnspents(bh, tx)
- assert.Equal(t, len(uxouts), 1)
- assertValidUnspents(t, bh, tx, uxouts)
+ uxouts := CreateUnspents(bh, txn)
+ require.Equal(t, len(uxouts), 1)
+ requireValidUnspents(t, bh, txn, uxouts)
}
-func assertUnspent(t *testing.T, bh BlockHeader, tx Transaction, txIndex int, ux UxOut) {
- assert.Equal(t, bh.Time, ux.Head.Time)
- assert.Equal(t, bh.BkSeq, ux.Head.BkSeq)
- assert.Equal(t, tx.Hash(), ux.Body.SrcTransaction)
- assert.Equal(t, tx.Out[txIndex].Address, ux.Body.Address)
- assert.Equal(t, tx.Out[txIndex].Coins, ux.Body.Coins)
- assert.Equal(t, tx.Out[txIndex].Hours, ux.Body.Hours)
+func requireUnspent(t *testing.T, bh BlockHeader, txn Transaction, txIndex int, ux UxOut) {
+ require.Equal(t, bh.Time, ux.Head.Time)
+ require.Equal(t, bh.BkSeq, ux.Head.BkSeq)
+ require.Equal(t, txn.Hash(), ux.Body.SrcTransaction)
+ require.Equal(t, txn.Out[txIndex].Address, ux.Body.Address)
+ require.Equal(t, txn.Out[txIndex].Coins, ux.Body.Coins)
+ require.Equal(t, txn.Out[txIndex].Hours, ux.Body.Hours)
}
-func assertValidUnspents(t *testing.T, bh BlockHeader, tx Transaction,
+func requireValidUnspents(t *testing.T, bh BlockHeader, txn Transaction,
uxo UxArray) {
- assert.Equal(t, len(tx.Out), len(uxo))
+ require.Equal(t, len(txn.Out), len(uxo))
for i, ux := range uxo {
- assert.Equal(t, bh.Time, ux.Head.Time)
- assert.Equal(t, bh.BkSeq, ux.Head.BkSeq)
- assert.Equal(t, tx.Hash(), ux.Body.SrcTransaction)
- assert.Equal(t, tx.Out[i].Address, ux.Body.Address)
- assert.Equal(t, tx.Out[i].Coins, ux.Body.Coins)
- assert.Equal(t, tx.Out[i].Hours, ux.Body.Hours)
+ require.Equal(t, bh.Time, ux.Head.Time)
+ require.Equal(t, bh.BkSeq, ux.Head.BkSeq)
+ require.Equal(t, txn.Hash(), ux.Body.SrcTransaction)
+ require.Equal(t, txn.Out[i].Address, ux.Body.Address)
+ require.Equal(t, txn.Out[i].Coins, ux.Body.Coins)
+ require.Equal(t, txn.Out[i].Hours, ux.Body.Hours)
}
}
diff --git a/vendor/github.com/skycoin/skycoin/src/coin/coin_test.go b/vendor/github.com/skycoin/skycoin/src/coin/coin_test.go
deleted file mode 100644
index a371fe3..0000000
--- a/vendor/github.com/skycoin/skycoin/src/coin/coin_test.go
+++ /dev/null
@@ -1,299 +0,0 @@
-package coin
-
-import (
- "encoding/hex"
- "math/rand"
- "testing"
-
- "github.com/skycoin/skycoin/src/cipher"
- "github.com/skycoin/skycoin/src/util/utc"
-)
-
-var (
- genPublic, genSecret = cipher.GenerateKeyPair()
- genAddress = cipher.AddressFromPubKey(genPublic)
- _genTime uint64 = 1000
- _genCoins uint64 = 1000e6
- _genCoinHours uint64 = 1000 * 1000
-)
-
-func tNow() uint64 {
- return uint64(utc.UnixNow())
-}
-
-func _feeCalc(t *Transaction) (uint64, error) {
- return 0, nil
-}
-
-func TestAddress1(t *testing.T) {
- a := "02fa939957e9fc52140e180264e621c2576a1bfe781f88792fb315ca3d1786afb8"
- b, err := hex.DecodeString(a)
- if err != nil {
- t.Fatal(err)
- }
- addr := cipher.AddressFromPubKey(cipher.NewPubKey(b))
- _ = addr
-
- ///func SignHash(hash cipher.SHA256, sec SecKey) (Sig, error) {
-
-}
-
-func TestAddress2(t *testing.T) {
- a := "5a42c0643bdb465d90bf673b99c14f5fa02db71513249d904573d2b8b63d353d"
- b, err := hex.DecodeString(a)
- if err != nil {
- t.Fail()
- }
-
- if len(b) != 32 {
- t.Fail()
- }
-
- seckey := cipher.NewSecKey(b)
- pubkey := cipher.PubKeyFromSecKey(seckey)
- addr := cipher.AddressFromPubKey(pubkey)
- _ = addr
-
- ///func SignHash(hash cipher.SHA256, sec SecKey) (Sig, error) {
-
-}
-
-//TODO: 100% coverage over cryptographic functions
-
-//Crypto Functions to Test
-//func ChkSig(address Address, hash cipher.SHA256, sig Sig) error {
-//func SignHash(hash cipher.SHA256, sec SecKey) (Sig, error) {
-//func cipher.PubKeyFromSecKey(seckey SecKey) (PubKey) {
-//func PubKeyFromSig(sig Sig, hash cipher.SHA256) (PubKey, error) {
-//func VerifySignature(pubkey PubKey, sig Sig, hash cipher.SHA256) error {
-//func GenerateKeyPair() (PubKey, SecKey) {
-//func GenerateDeterministicKeyPair(seed []byte) (PubKey, SecKey) {
-//func testSecKey(seckey SecKey) error {
-
-func TestCrypto1(t *testing.T) {
- for i := 0; i < 10; i++ {
- _, seckey := cipher.GenerateKeyPair()
- if cipher.TestSecKey(seckey) != nil {
- t.Fatal("CRYPTOGRAPHIC INTEGRITY CHECK FAILED")
- }
- }
-}
-
-//test signatures
-func TestCrypto2(t *testing.T) {
- a := "5a42c0643bdb465d90bf673b99c14f5fa02db71513249d904573d2b8b63d353d"
- b, err := hex.DecodeString(a)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(b) != 32 {
- t.Fatal()
- }
-
- seckey := cipher.NewSecKey(b)
- pubkey := cipher.PubKeyFromSecKey(seckey)
-
- addr := cipher.AddressFromPubKey(pubkey)
- _ = addr
-
- test := []byte("test message")
- hash := cipher.SumSHA256(test)
- err = cipher.TestSecKeyHash(seckey, hash)
- if err != nil {
- t.Fatal()
- }
-
-}
-
-func _gensec() cipher.SecKey {
- _, s := cipher.GenerateKeyPair()
- return s
-}
-
-func _gpub(s cipher.SecKey) cipher.PubKey {
- return cipher.PubKeyFromSecKey(s)
-}
-
-func _gaddr(s cipher.SecKey) cipher.Address {
- return cipher.AddressFromSecKey(s)
-}
-
-func _gaddrA1(S []cipher.SecKey) []cipher.Address {
- A := make([]cipher.Address, len(S))
- for i := 0; i < len(S); i++ {
- A[i] = cipher.AddressFromSecKey(S[i])
- }
- return A
-}
-
-func _gaddrA2(S []cipher.SecKey, O []UxOut) []int {
- A := _gaddrA1(S)
- var M map[cipher.Address]int //address to int
- for i, a := range A {
- M[a] = i
- }
-
- I := make([]int, len(O)) //output to seckey/address index
- for i, o := range O {
- I[i] = M[o.Body.Address]
- }
-
- return I
-}
-
-func _gaddrA3(S []cipher.SecKey) map[cipher.Address]int {
- A := _gaddrA1(S)
- M := make(map[cipher.Address]int) //address to int
- for i, a := range A {
- M[a] = i
- }
- return M
-}
-
-//assign amt to n bins in randomized manner
-func _randBins(amt uint64, n int) []uint64 {
- bins := make([]uint64, n)
- max := amt / (4 * uint64(n))
- for i := 0; amt > 0; i++ {
- //amount going into this bin
- b := 1 + (uint64(rand.Int63()) % max)
- if b > amt {
- b = amt
- }
- bins[i%n] += b
- amt -= b
- }
- return bins
-}
-
-/*
-TODO: check block header of new block
-TODO: check that coins are not created or destroyed
-TODO:
-*/
-
-//create 4096 addresses
-//send addreses randomly between each other over 1024 blocks
-
-/*
-func TestBlockchain1(t *testing.T) {
-
- var S []SecKey
- for i := 0; i < 4096; i++ {
- S = append(S, _gensec())
- }
-
- A := _gaddr_a1(S)
-
- var bc *Blockchain = NewBlockchain(A[0])
-
- for i := 0; i < 1024; i++ {
- b := bc.NewBlock()
-
- //unspent outputs
- U := make([]UxOut, len(bc.Unspent))
- copy(U, bc.Unspent)
-
- //for _,Ux := range U {
- // if Ux.Hours() < Ux.Body.
- //}
- //I := _gaddr_a2(S,U)
- M := _gaddr_a3(S, U)
- var num_in int = 1 + rand.Intn(len(U))%15
- var num_out int = 1 + rand.Int()%30
-
- var t Transaction
-
- SigIdx := make([]int, num_in)
-
- var v1 uint64 = 0
- var v2 uint64 = 0
- for i := 0; i < num_in; i++ {
- idx := rand.Intn(len(U))
- var Ux UxOut = U[idx] //unspent output to spend
- U[idx], U = U[len(U)-1], U[:len(U)-1] //remove output idx
-
- v1 += Ux.Body.Coins
- v2 += Ux.Body.Hours
-
- //index of signature that must sign input
- SigIdx[i] = M[Ux.Body.Address] //signature index
-
- var ti TransactionInput
- ti.SigIdx = uint16(i)
- ti.UxOut = Ux.Hash()
- t.TxIn = append(t.TxIn, ti) //append input to transaction
- }
-
- //assign coins to output addresses in random manner
-
- //check that inputs/outputs sum
- v1_ := v1
- v2_ := v2
-
- vo1 := _rand_bins(v1, num_out)
- vo2 := _rand_bins(v2, num_out)
-
- var v1_t uint64
- var v2_t uint64
- for i, _ := range vo1 {
- v1_t += vo1[i]
- v2_t += vo2[i]
- }
-
- if v1_t != v1_ {
- log.Panic()
- }
- if v2_t != v2_ {
- log.Panic()
- }
- //log.Printf("%v %v, %v %v \n", v1_,v2_, v1_t, v2_t)
-
- for i := 0; i < num_out; i++ {
- var to TransactionOutput
- to.Address = A[rand.Intn(len(A))]
- to.Coins = vo1[i]
- to.Hours = vo2[i]
- t.TxOut = append(t.TxOut, to)
- }
-
- //transaction complete, now set signatures
- for i := 0; i < num_in; i++ {
- t.SetSig(uint16(i), S[SigIdx[i]])
- }
- t.UpdateHeader() //sets hash
-
- err := bc.AppendTransaction(&b, t)
- if err != nil {
- log.Panic(err)
- }
-
- fmt.Printf("Block %v \n", i)
- err = bc.ExecuteBlock(b)
- if err != nil {
- log.Panic(err)
- }
-
- }
-}
-*/
-
-/*
-func TestGetListenPort(t *testing.T) {
- // No connectionMirror found
- assert.Equal(t, getListenPort(addr), uint16(0))
- // No mirrorConnection map exists
- ConnectionMirrors[addr] = uint32(4)
- assert.Panics(t, func() { getListenPort(addr) })
- // Everything is good
- m := make(map[string]uint16)
- mirrorConnections[uint32(4)] = m
- m[addrIP] = uint16(6667)
- assert.Equal(t, getListenPort(addr), uint16(6667))
-
- // cleanup
- delete(mirrorConnections, uint32(4))
- delete(ConnectionMirrors, addr)
-}
-*/
diff --git a/vendor/github.com/skycoin/skycoin/src/coin/transaction_inputs_skyencoder_test.go b/vendor/github.com/skycoin/skycoin/src/coin/transaction_inputs_skyencoder_test.go
new file mode 100644
index 0000000..00f5f58
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/coin/transaction_inputs_skyencoder_test.go
@@ -0,0 +1,421 @@
+// Code generated by github.com/skycoin/skyencoder. DO NOT EDIT.
+
+package coin
+
+import (
+ "bytes"
+ "fmt"
+ mathrand "math/rand"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/skycoin/encodertest"
+ "github.com/skycoin/skycoin/src/cipher/encoder"
+)
+
+func newEmptyTransactionInputsForEncodeTest() *transactionInputs {
+ var obj transactionInputs
+ return &obj
+}
+
+func newRandomTransactionInputsForEncodeTest(t *testing.T, rand *mathrand.Rand) *transactionInputs {
+ var obj transactionInputs
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 4,
+ MinRandLen: 1,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func newRandomZeroLenTransactionInputsForEncodeTest(t *testing.T, rand *mathrand.Rand) *transactionInputs {
+ var obj transactionInputs
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 0,
+ MinRandLen: 0,
+ EmptySliceNil: false,
+ EmptyMapNil: false,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func newRandomZeroLenNilTransactionInputsForEncodeTest(t *testing.T, rand *mathrand.Rand) *transactionInputs {
+ var obj transactionInputs
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 0,
+ MinRandLen: 0,
+ EmptySliceNil: true,
+ EmptyMapNil: true,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func testSkyencoderTransactionInputs(t *testing.T, obj *transactionInputs) {
+ isEncodableField := func(f reflect.StructField) bool {
+ // Skip unexported fields
+ if f.PkgPath != "" {
+ return false
+ }
+
+ // Skip fields disabled with and enc:"- struct tag
+ tag := f.Tag.Get("enc")
+ return !strings.HasPrefix(tag, "-,") && tag != "-"
+ }
+
+ hasOmitEmptyField := func(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+ n := v.NumField()
+ f := t.Field(n - 1)
+ tag := f.Tag.Get("enc")
+ return isEncodableField(f) && strings.Contains(tag, ",omitempty")
+ default:
+ return false
+ }
+ }
+
+ // returns the number of bytes encoded by an omitempty field on a given object
+ omitEmptyLen := func(obj interface{}) uint64 {
+ if !hasOmitEmptyField(obj) {
+ return 0
+ }
+
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ n := v.NumField()
+ f := v.Field(n - 1)
+ if f.Len() == 0 {
+ return 0
+ }
+ return uint64(4 + f.Len())
+
+ default:
+ return 0
+ }
+ }
+
+ // encodeSize
+
+ n1 := encoder.Size(obj)
+ n2 := encodeSizeTransactionInputs(obj)
+
+ if uint64(n1) != n2 {
+ t.Fatalf("encoder.Size() != encodeSizeTransactionInputs() (%d != %d)", n1, n2)
+ }
+
+ // Encode
+
+ // encoder.Serialize
+ data1 := encoder.Serialize(obj)
+
+ // Encode
+ data2, err := encodeTransactionInputs(obj)
+ if err != nil {
+ t.Fatalf("encodeTransactionInputs failed: %v", err)
+ }
+ if uint64(len(data2)) != n2 {
+ t.Fatal("encodeTransactionInputs produced bytes of unexpected length")
+ }
+ if len(data1) != len(data2) {
+ t.Fatalf("len(encoder.Serialize()) != len(encodeTransactionInputs()) (%d != %d)", len(data1), len(data2))
+ }
+
+ // EncodeToBuffer
+ data3 := make([]byte, n2+5)
+ if err := encodeTransactionInputsToBuffer(data3, obj); err != nil {
+ t.Fatalf("encodeTransactionInputsToBuffer failed: %v", err)
+ }
+
+ if !bytes.Equal(data1, data2) {
+ t.Fatal("encoder.Serialize() != encode[1]s()")
+ }
+
+ // Decode
+
+ // encoder.DeserializeRaw
+ var obj2 transactionInputs
+ if n, err := encoder.DeserializeRaw(data1, &obj2); err != nil {
+ t.Fatalf("encoder.DeserializeRaw failed: %v", err)
+ } else if n != uint64(len(data1)) {
+ t.Fatalf("encoder.DeserializeRaw failed: %v", encoder.ErrRemainingBytes)
+ }
+ if !cmp.Equal(*obj, obj2, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw result wrong")
+ }
+
+ // Decode
+ var obj3 transactionInputs
+ if n, err := decodeTransactionInputs(data2, &obj3); err != nil {
+ t.Fatalf("decodeTransactionInputs failed: %v", err)
+ } else if n != uint64(len(data2)) {
+ t.Fatalf("decodeTransactionInputs bytes read length should be %d, is %d", len(data2), n)
+ }
+ if !cmp.Equal(obj2, obj3, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeTransactionInputs()")
+ }
+
+ // Decode, excess buffer
+ var obj4 transactionInputs
+ n, err := decodeTransactionInputs(data3, &obj4)
+ if err != nil {
+ t.Fatalf("decodeTransactionInputs failed: %v", err)
+ }
+
+ if hasOmitEmptyField(&obj4) && omitEmptyLen(&obj4) == 0 {
+ // 4 bytes read for the omitEmpty length, which should be zero (see the 5 bytes added above)
+ if n != n2+4 {
+ t.Fatalf("decodeTransactionInputs bytes read length should be %d, is %d", n2+4, n)
+ }
+ } else {
+ if n != n2 {
+ t.Fatalf("decodeTransactionInputs bytes read length should be %d, is %d", n2, n)
+ }
+ }
+ if !cmp.Equal(obj2, obj4, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeTransactionInputs()")
+ }
+
+ // DecodeExact
+ var obj5 transactionInputs
+ if err := decodeTransactionInputsExact(data2, &obj5); err != nil {
+ t.Fatalf("decodeTransactionInputs failed: %v", err)
+ }
+ if !cmp.Equal(obj2, obj5, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeTransactionInputs()")
+ }
+
+ // Check that the bytes read value is correct when providing an extended buffer
+ if !hasOmitEmptyField(&obj3) || omitEmptyLen(&obj3) > 0 {
+ padding := []byte{0xFF, 0xFE, 0xFD, 0xFC}
+ data4 := append(data2[:], padding...)
+ if n, err := decodeTransactionInputs(data4, &obj3); err != nil {
+ t.Fatalf("decodeTransactionInputs failed: %v", err)
+ } else if n != uint64(len(data2)) {
+ t.Fatalf("decodeTransactionInputs bytes read length should be %d, is %d", len(data2), n)
+ }
+ }
+}
+
+func TestSkyencoderTransactionInputs(t *testing.T) {
+ rand := mathrand.New(mathrand.NewSource(time.Now().Unix()))
+
+ type testCase struct {
+ name string
+ obj *transactionInputs
+ }
+
+ cases := []testCase{
+ {
+ name: "empty object",
+ obj: newEmptyTransactionInputsForEncodeTest(),
+ },
+ }
+
+ nRandom := 10
+
+ for i := 0; i < nRandom; i++ {
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d", i),
+ obj: newRandomTransactionInputsForEncodeTest(t, rand),
+ })
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d with zero length variable length contents", i),
+ obj: newRandomZeroLenTransactionInputsForEncodeTest(t, rand),
+ })
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d with zero length variable length contents set to nil", i),
+ obj: newRandomZeroLenNilTransactionInputsForEncodeTest(t, rand),
+ })
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ testSkyencoderTransactionInputs(t, tc.obj)
+ })
+ }
+}
+
+func decodeTransactionInputsExpectError(t *testing.T, buf []byte, expectedErr error) {
+ var obj transactionInputs
+ if _, err := decodeTransactionInputs(buf, &obj); err == nil {
+ t.Fatal("decodeTransactionInputs: expected error, got nil")
+ } else if err != expectedErr {
+ t.Fatalf("decodeTransactionInputs: expected error %q, got %q", expectedErr, err)
+ }
+}
+
+func decodeTransactionInputsExactExpectError(t *testing.T, buf []byte, expectedErr error) {
+ var obj transactionInputs
+ if err := decodeTransactionInputsExact(buf, &obj); err == nil {
+ t.Fatal("decodeTransactionInputsExact: expected error, got nil")
+ } else if err != expectedErr {
+ t.Fatalf("decodeTransactionInputsExact: expected error %q, got %q", expectedErr, err)
+ }
+}
+
+func testSkyencoderTransactionInputsDecodeErrors(t *testing.T, k int, tag string, obj *transactionInputs) {
+ isEncodableField := func(f reflect.StructField) bool {
+ // Skip unexported fields
+ if f.PkgPath != "" {
+ return false
+ }
+
+ // Skip fields disabled with and enc:"- struct tag
+ tag := f.Tag.Get("enc")
+ return !strings.HasPrefix(tag, "-,") && tag != "-"
+ }
+
+ numEncodableFields := func(obj interface{}) int {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+
+ n := 0
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if !isEncodableField(f) {
+ continue
+ }
+ n++
+ }
+ return n
+ default:
+ return 0
+ }
+ }
+
+ hasOmitEmptyField := func(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+ n := v.NumField()
+ f := t.Field(n - 1)
+ tag := f.Tag.Get("enc")
+ return isEncodableField(f) && strings.Contains(tag, ",omitempty")
+ default:
+ return false
+ }
+ }
+
+ // returns the number of bytes encoded by an omitempty field on a given object
+ omitEmptyLen := func(obj interface{}) uint64 {
+ if !hasOmitEmptyField(obj) {
+ return 0
+ }
+
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ n := v.NumField()
+ f := v.Field(n - 1)
+ if f.Len() == 0 {
+ return 0
+ }
+ return uint64(4 + f.Len())
+
+ default:
+ return 0
+ }
+ }
+
+ n := encodeSizeTransactionInputs(obj)
+ buf, err := encodeTransactionInputs(obj)
+ if err != nil {
+ t.Fatalf("encodeTransactionInputs failed: %v", err)
+ }
+
+ // A nil buffer cannot decode, unless the object is a struct with a single omitempty field
+ if hasOmitEmptyField(obj) && numEncodableFields(obj) > 1 {
+ t.Run(fmt.Sprintf("%d %s buffer underflow nil", k, tag), func(t *testing.T) {
+ decodeTransactionInputsExpectError(t, nil, encoder.ErrBufferUnderflow)
+ })
+
+ t.Run(fmt.Sprintf("%d %s exact buffer underflow nil", k, tag), func(t *testing.T) {
+ decodeTransactionInputsExactExpectError(t, nil, encoder.ErrBufferUnderflow)
+ })
+ }
+
+ // Test all possible truncations of the encoded byte array, but skip
+ // a truncation that would be valid where omitempty is removed
+ skipN := n - omitEmptyLen(obj)
+ for i := uint64(0); i < n; i++ {
+ if i == skipN {
+ continue
+ }
+
+ t.Run(fmt.Sprintf("%d %s buffer underflow bytes=%d", k, tag, i), func(t *testing.T) {
+ decodeTransactionInputsExpectError(t, buf[:i], encoder.ErrBufferUnderflow)
+ })
+
+ t.Run(fmt.Sprintf("%d %s exact buffer underflow bytes=%d", k, tag, i), func(t *testing.T) {
+ decodeTransactionInputsExactExpectError(t, buf[:i], encoder.ErrBufferUnderflow)
+ })
+ }
+
+ // Append 5 bytes for omit empty with a 0 length prefix, to cause an ErrRemainingBytes.
+ // If only 1 byte is appended, the decoder will try to read the 4-byte length prefix,
+ // and return an ErrBufferUnderflow instead
+ if hasOmitEmptyField(obj) {
+ buf = append(buf, []byte{0, 0, 0, 0, 0}...)
+ } else {
+ buf = append(buf, 0)
+ }
+
+ t.Run(fmt.Sprintf("%d %s exact buffer remaining bytes", k, tag), func(t *testing.T) {
+ decodeTransactionInputsExactExpectError(t, buf, encoder.ErrRemainingBytes)
+ })
+}
+
+func TestSkyencoderTransactionInputsDecodeErrors(t *testing.T) {
+ rand := mathrand.New(mathrand.NewSource(time.Now().Unix()))
+ n := 10
+
+ for i := 0; i < n; i++ {
+ emptyObj := newEmptyTransactionInputsForEncodeTest()
+ fullObj := newRandomTransactionInputsForEncodeTest(t, rand)
+ testSkyencoderTransactionInputsDecodeErrors(t, i, "empty", emptyObj)
+ testSkyencoderTransactionInputsDecodeErrors(t, i, "full", fullObj)
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/coin/transaction_outputs_skyencoder_test.go b/vendor/github.com/skycoin/skycoin/src/coin/transaction_outputs_skyencoder_test.go
new file mode 100644
index 0000000..4c134c9
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/coin/transaction_outputs_skyencoder_test.go
@@ -0,0 +1,421 @@
+// Code generated by github.com/skycoin/skyencoder. DO NOT EDIT.
+
+package coin
+
+import (
+ "bytes"
+ "fmt"
+ mathrand "math/rand"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/skycoin/encodertest"
+ "github.com/skycoin/skycoin/src/cipher/encoder"
+)
+
+func newEmptyTransactionOutputsForEncodeTest() *transactionOutputs {
+ var obj transactionOutputs
+ return &obj
+}
+
+func newRandomTransactionOutputsForEncodeTest(t *testing.T, rand *mathrand.Rand) *transactionOutputs {
+ var obj transactionOutputs
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 4,
+ MinRandLen: 1,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func newRandomZeroLenTransactionOutputsForEncodeTest(t *testing.T, rand *mathrand.Rand) *transactionOutputs {
+ var obj transactionOutputs
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 0,
+ MinRandLen: 0,
+ EmptySliceNil: false,
+ EmptyMapNil: false,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func newRandomZeroLenNilTransactionOutputsForEncodeTest(t *testing.T, rand *mathrand.Rand) *transactionOutputs {
+ var obj transactionOutputs
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 0,
+ MinRandLen: 0,
+ EmptySliceNil: true,
+ EmptyMapNil: true,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func testSkyencoderTransactionOutputs(t *testing.T, obj *transactionOutputs) {
+ isEncodableField := func(f reflect.StructField) bool {
+ // Skip unexported fields
+ if f.PkgPath != "" {
+ return false
+ }
+
+ // Skip fields disabled with and enc:"- struct tag
+ tag := f.Tag.Get("enc")
+ return !strings.HasPrefix(tag, "-,") && tag != "-"
+ }
+
+ hasOmitEmptyField := func(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+ n := v.NumField()
+ f := t.Field(n - 1)
+ tag := f.Tag.Get("enc")
+ return isEncodableField(f) && strings.Contains(tag, ",omitempty")
+ default:
+ return false
+ }
+ }
+
+ // returns the number of bytes encoded by an omitempty field on a given object
+ omitEmptyLen := func(obj interface{}) uint64 {
+ if !hasOmitEmptyField(obj) {
+ return 0
+ }
+
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ n := v.NumField()
+ f := v.Field(n - 1)
+ if f.Len() == 0 {
+ return 0
+ }
+ return uint64(4 + f.Len())
+
+ default:
+ return 0
+ }
+ }
+
+ // encodeSize
+
+ n1 := encoder.Size(obj)
+ n2 := encodeSizeTransactionOutputs(obj)
+
+ if uint64(n1) != n2 {
+ t.Fatalf("encoder.Size() != encodeSizeTransactionOutputs() (%d != %d)", n1, n2)
+ }
+
+ // Encode
+
+ // encoder.Serialize
+ data1 := encoder.Serialize(obj)
+
+ // Encode
+ data2, err := encodeTransactionOutputs(obj)
+ if err != nil {
+ t.Fatalf("encodeTransactionOutputs failed: %v", err)
+ }
+ if uint64(len(data2)) != n2 {
+ t.Fatal("encodeTransactionOutputs produced bytes of unexpected length")
+ }
+ if len(data1) != len(data2) {
+ t.Fatalf("len(encoder.Serialize()) != len(encodeTransactionOutputs()) (%d != %d)", len(data1), len(data2))
+ }
+
+ // EncodeToBuffer
+ data3 := make([]byte, n2+5)
+ if err := encodeTransactionOutputsToBuffer(data3, obj); err != nil {
+ t.Fatalf("encodeTransactionOutputsToBuffer failed: %v", err)
+ }
+
+ if !bytes.Equal(data1, data2) {
+ t.Fatal("encoder.Serialize() != encode[1]s()")
+ }
+
+ // Decode
+
+ // encoder.DeserializeRaw
+ var obj2 transactionOutputs
+ if n, err := encoder.DeserializeRaw(data1, &obj2); err != nil {
+ t.Fatalf("encoder.DeserializeRaw failed: %v", err)
+ } else if n != uint64(len(data1)) {
+ t.Fatalf("encoder.DeserializeRaw failed: %v", encoder.ErrRemainingBytes)
+ }
+ if !cmp.Equal(*obj, obj2, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw result wrong")
+ }
+
+ // Decode
+ var obj3 transactionOutputs
+ if n, err := decodeTransactionOutputs(data2, &obj3); err != nil {
+ t.Fatalf("decodeTransactionOutputs failed: %v", err)
+ } else if n != uint64(len(data2)) {
+ t.Fatalf("decodeTransactionOutputs bytes read length should be %d, is %d", len(data2), n)
+ }
+ if !cmp.Equal(obj2, obj3, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeTransactionOutputs()")
+ }
+
+ // Decode, excess buffer
+ var obj4 transactionOutputs
+ n, err := decodeTransactionOutputs(data3, &obj4)
+ if err != nil {
+ t.Fatalf("decodeTransactionOutputs failed: %v", err)
+ }
+
+ if hasOmitEmptyField(&obj4) && omitEmptyLen(&obj4) == 0 {
+ // 4 bytes read for the omitEmpty length, which should be zero (see the 5 bytes added above)
+ if n != n2+4 {
+ t.Fatalf("decodeTransactionOutputs bytes read length should be %d, is %d", n2+4, n)
+ }
+ } else {
+ if n != n2 {
+ t.Fatalf("decodeTransactionOutputs bytes read length should be %d, is %d", n2, n)
+ }
+ }
+ if !cmp.Equal(obj2, obj4, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeTransactionOutputs()")
+ }
+
+ // DecodeExact
+ var obj5 transactionOutputs
+ if err := decodeTransactionOutputsExact(data2, &obj5); err != nil {
+ t.Fatalf("decodeTransactionOutputs failed: %v", err)
+ }
+ if !cmp.Equal(obj2, obj5, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeTransactionOutputs()")
+ }
+
+ // Check that the bytes read value is correct when providing an extended buffer
+ if !hasOmitEmptyField(&obj3) || omitEmptyLen(&obj3) > 0 {
+ padding := []byte{0xFF, 0xFE, 0xFD, 0xFC}
+ data4 := append(data2[:], padding...)
+ if n, err := decodeTransactionOutputs(data4, &obj3); err != nil {
+ t.Fatalf("decodeTransactionOutputs failed: %v", err)
+ } else if n != uint64(len(data2)) {
+ t.Fatalf("decodeTransactionOutputs bytes read length should be %d, is %d", len(data2), n)
+ }
+ }
+}
+
+func TestSkyencoderTransactionOutputs(t *testing.T) {
+ rand := mathrand.New(mathrand.NewSource(time.Now().Unix()))
+
+ type testCase struct {
+ name string
+ obj *transactionOutputs
+ }
+
+ cases := []testCase{
+ {
+ name: "empty object",
+ obj: newEmptyTransactionOutputsForEncodeTest(),
+ },
+ }
+
+ nRandom := 10
+
+ for i := 0; i < nRandom; i++ {
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d", i),
+ obj: newRandomTransactionOutputsForEncodeTest(t, rand),
+ })
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d with zero length variable length contents", i),
+ obj: newRandomZeroLenTransactionOutputsForEncodeTest(t, rand),
+ })
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d with zero length variable length contents set to nil", i),
+ obj: newRandomZeroLenNilTransactionOutputsForEncodeTest(t, rand),
+ })
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ testSkyencoderTransactionOutputs(t, tc.obj)
+ })
+ }
+}
+
+func decodeTransactionOutputsExpectError(t *testing.T, buf []byte, expectedErr error) {
+ var obj transactionOutputs
+ if _, err := decodeTransactionOutputs(buf, &obj); err == nil {
+ t.Fatal("decodeTransactionOutputs: expected error, got nil")
+ } else if err != expectedErr {
+ t.Fatalf("decodeTransactionOutputs: expected error %q, got %q", expectedErr, err)
+ }
+}
+
+func decodeTransactionOutputsExactExpectError(t *testing.T, buf []byte, expectedErr error) {
+ var obj transactionOutputs
+ if err := decodeTransactionOutputsExact(buf, &obj); err == nil {
+ t.Fatal("decodeTransactionOutputsExact: expected error, got nil")
+ } else if err != expectedErr {
+ t.Fatalf("decodeTransactionOutputsExact: expected error %q, got %q", expectedErr, err)
+ }
+}
+
+func testSkyencoderTransactionOutputsDecodeErrors(t *testing.T, k int, tag string, obj *transactionOutputs) {
+ isEncodableField := func(f reflect.StructField) bool {
+ // Skip unexported fields
+ if f.PkgPath != "" {
+ return false
+ }
+
+ // Skip fields disabled with and enc:"- struct tag
+ tag := f.Tag.Get("enc")
+ return !strings.HasPrefix(tag, "-,") && tag != "-"
+ }
+
+ numEncodableFields := func(obj interface{}) int {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+
+ n := 0
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if !isEncodableField(f) {
+ continue
+ }
+ n++
+ }
+ return n
+ default:
+ return 0
+ }
+ }
+
+ hasOmitEmptyField := func(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+ n := v.NumField()
+ f := t.Field(n - 1)
+ tag := f.Tag.Get("enc")
+ return isEncodableField(f) && strings.Contains(tag, ",omitempty")
+ default:
+ return false
+ }
+ }
+
+ // returns the number of bytes encoded by an omitempty field on a given object
+ omitEmptyLen := func(obj interface{}) uint64 {
+ if !hasOmitEmptyField(obj) {
+ return 0
+ }
+
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ n := v.NumField()
+ f := v.Field(n - 1)
+ if f.Len() == 0 {
+ return 0
+ }
+ return uint64(4 + f.Len())
+
+ default:
+ return 0
+ }
+ }
+
+ n := encodeSizeTransactionOutputs(obj)
+ buf, err := encodeTransactionOutputs(obj)
+ if err != nil {
+ t.Fatalf("encodeTransactionOutputs failed: %v", err)
+ }
+
+ // A nil buffer cannot decode, unless the object is a struct with a single omitempty field
+ if hasOmitEmptyField(obj) && numEncodableFields(obj) > 1 {
+ t.Run(fmt.Sprintf("%d %s buffer underflow nil", k, tag), func(t *testing.T) {
+ decodeTransactionOutputsExpectError(t, nil, encoder.ErrBufferUnderflow)
+ })
+
+ t.Run(fmt.Sprintf("%d %s exact buffer underflow nil", k, tag), func(t *testing.T) {
+ decodeTransactionOutputsExactExpectError(t, nil, encoder.ErrBufferUnderflow)
+ })
+ }
+
+ // Test all possible truncations of the encoded byte array, but skip
+ // a truncation that would be valid where omitempty is removed
+ skipN := n - omitEmptyLen(obj)
+ for i := uint64(0); i < n; i++ {
+ if i == skipN {
+ continue
+ }
+
+ t.Run(fmt.Sprintf("%d %s buffer underflow bytes=%d", k, tag, i), func(t *testing.T) {
+ decodeTransactionOutputsExpectError(t, buf[:i], encoder.ErrBufferUnderflow)
+ })
+
+ t.Run(fmt.Sprintf("%d %s exact buffer underflow bytes=%d", k, tag, i), func(t *testing.T) {
+ decodeTransactionOutputsExactExpectError(t, buf[:i], encoder.ErrBufferUnderflow)
+ })
+ }
+
+ // Append 5 bytes for omit empty with a 0 length prefix, to cause an ErrRemainingBytes.
+ // If only 1 byte is appended, the decoder will try to read the 4-byte length prefix,
+ // and return an ErrBufferUnderflow instead
+ if hasOmitEmptyField(obj) {
+ buf = append(buf, []byte{0, 0, 0, 0, 0}...)
+ } else {
+ buf = append(buf, 0)
+ }
+
+ t.Run(fmt.Sprintf("%d %s exact buffer remaining bytes", k, tag), func(t *testing.T) {
+ decodeTransactionOutputsExactExpectError(t, buf, encoder.ErrRemainingBytes)
+ })
+}
+
+func TestSkyencoderTransactionOutputsDecodeErrors(t *testing.T) {
+ rand := mathrand.New(mathrand.NewSource(time.Now().Unix()))
+ n := 10
+
+ for i := 0; i < n; i++ {
+ emptyObj := newEmptyTransactionOutputsForEncodeTest()
+ fullObj := newRandomTransactionOutputsForEncodeTest(t, rand)
+ testSkyencoderTransactionOutputsDecodeErrors(t, i, "empty", emptyObj)
+ testSkyencoderTransactionOutputsDecodeErrors(t, i, "full", fullObj)
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/coin/transaction_skyencoder_test.go b/vendor/github.com/skycoin/skycoin/src/coin/transaction_skyencoder_test.go
new file mode 100644
index 0000000..f31d02d
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/coin/transaction_skyencoder_test.go
@@ -0,0 +1,421 @@
+// Code generated by github.com/skycoin/skyencoder. DO NOT EDIT.
+
+package coin
+
+import (
+ "bytes"
+ "fmt"
+ mathrand "math/rand"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/skycoin/encodertest"
+ "github.com/skycoin/skycoin/src/cipher/encoder"
+)
+
+func newEmptyTransactionForEncodeTest() *Transaction {
+ var obj Transaction
+ return &obj
+}
+
+func newRandomTransactionForEncodeTest(t *testing.T, rand *mathrand.Rand) *Transaction {
+ var obj Transaction
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 4,
+ MinRandLen: 1,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func newRandomZeroLenTransactionForEncodeTest(t *testing.T, rand *mathrand.Rand) *Transaction {
+ var obj Transaction
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 0,
+ MinRandLen: 0,
+ EmptySliceNil: false,
+ EmptyMapNil: false,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func newRandomZeroLenNilTransactionForEncodeTest(t *testing.T, rand *mathrand.Rand) *Transaction {
+ var obj Transaction
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 0,
+ MinRandLen: 0,
+ EmptySliceNil: true,
+ EmptyMapNil: true,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func testSkyencoderTransaction(t *testing.T, obj *Transaction) {
+ isEncodableField := func(f reflect.StructField) bool {
+ // Skip unexported fields
+ if f.PkgPath != "" {
+ return false
+ }
+
+ // Skip fields disabled with and enc:"- struct tag
+ tag := f.Tag.Get("enc")
+ return !strings.HasPrefix(tag, "-,") && tag != "-"
+ }
+
+ hasOmitEmptyField := func(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+ n := v.NumField()
+ f := t.Field(n - 1)
+ tag := f.Tag.Get("enc")
+ return isEncodableField(f) && strings.Contains(tag, ",omitempty")
+ default:
+ return false
+ }
+ }
+
+ // returns the number of bytes encoded by an omitempty field on a given object
+ omitEmptyLen := func(obj interface{}) uint64 {
+ if !hasOmitEmptyField(obj) {
+ return 0
+ }
+
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ n := v.NumField()
+ f := v.Field(n - 1)
+ if f.Len() == 0 {
+ return 0
+ }
+ return uint64(4 + f.Len())
+
+ default:
+ return 0
+ }
+ }
+
+ // encodeSize
+
+ n1 := encoder.Size(obj)
+ n2 := encodeSizeTransaction(obj)
+
+ if uint64(n1) != n2 {
+ t.Fatalf("encoder.Size() != encodeSizeTransaction() (%d != %d)", n1, n2)
+ }
+
+ // Encode
+
+ // encoder.Serialize
+ data1 := encoder.Serialize(obj)
+
+ // Encode
+ data2, err := encodeTransaction(obj)
+ if err != nil {
+ t.Fatalf("encodeTransaction failed: %v", err)
+ }
+ if uint64(len(data2)) != n2 {
+ t.Fatal("encodeTransaction produced bytes of unexpected length")
+ }
+ if len(data1) != len(data2) {
+ t.Fatalf("len(encoder.Serialize()) != len(encodeTransaction()) (%d != %d)", len(data1), len(data2))
+ }
+
+ // EncodeToBuffer
+ data3 := make([]byte, n2+5)
+ if err := encodeTransactionToBuffer(data3, obj); err != nil {
+ t.Fatalf("encodeTransactionToBuffer failed: %v", err)
+ }
+
+ if !bytes.Equal(data1, data2) {
+ t.Fatal("encoder.Serialize() != encode[1]s()")
+ }
+
+ // Decode
+
+ // encoder.DeserializeRaw
+ var obj2 Transaction
+ if n, err := encoder.DeserializeRaw(data1, &obj2); err != nil {
+ t.Fatalf("encoder.DeserializeRaw failed: %v", err)
+ } else if n != uint64(len(data1)) {
+ t.Fatalf("encoder.DeserializeRaw failed: %v", encoder.ErrRemainingBytes)
+ }
+ if !cmp.Equal(*obj, obj2, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw result wrong")
+ }
+
+ // Decode
+ var obj3 Transaction
+ if n, err := decodeTransaction(data2, &obj3); err != nil {
+ t.Fatalf("decodeTransaction failed: %v", err)
+ } else if n != uint64(len(data2)) {
+ t.Fatalf("decodeTransaction bytes read length should be %d, is %d", len(data2), n)
+ }
+ if !cmp.Equal(obj2, obj3, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeTransaction()")
+ }
+
+ // Decode, excess buffer
+ var obj4 Transaction
+ n, err := decodeTransaction(data3, &obj4)
+ if err != nil {
+ t.Fatalf("decodeTransaction failed: %v", err)
+ }
+
+ if hasOmitEmptyField(&obj4) && omitEmptyLen(&obj4) == 0 {
+ // 4 bytes read for the omitEmpty length, which should be zero (see the 5 bytes added above)
+ if n != n2+4 {
+ t.Fatalf("decodeTransaction bytes read length should be %d, is %d", n2+4, n)
+ }
+ } else {
+ if n != n2 {
+ t.Fatalf("decodeTransaction bytes read length should be %d, is %d", n2, n)
+ }
+ }
+ if !cmp.Equal(obj2, obj4, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeTransaction()")
+ }
+
+ // DecodeExact
+ var obj5 Transaction
+ if err := decodeTransactionExact(data2, &obj5); err != nil {
+ t.Fatalf("decodeTransaction failed: %v", err)
+ }
+ if !cmp.Equal(obj2, obj5, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeTransaction()")
+ }
+
+ // Check that the bytes read value is correct when providing an extended buffer
+ if !hasOmitEmptyField(&obj3) || omitEmptyLen(&obj3) > 0 {
+ padding := []byte{0xFF, 0xFE, 0xFD, 0xFC}
+ data4 := append(data2[:], padding...)
+ if n, err := decodeTransaction(data4, &obj3); err != nil {
+ t.Fatalf("decodeTransaction failed: %v", err)
+ } else if n != uint64(len(data2)) {
+ t.Fatalf("decodeTransaction bytes read length should be %d, is %d", len(data2), n)
+ }
+ }
+}
+
+func TestSkyencoderTransaction(t *testing.T) {
+ rand := mathrand.New(mathrand.NewSource(time.Now().Unix()))
+
+ type testCase struct {
+ name string
+ obj *Transaction
+ }
+
+ cases := []testCase{
+ {
+ name: "empty object",
+ obj: newEmptyTransactionForEncodeTest(),
+ },
+ }
+
+ nRandom := 10
+
+ for i := 0; i < nRandom; i++ {
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d", i),
+ obj: newRandomTransactionForEncodeTest(t, rand),
+ })
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d with zero length variable length contents", i),
+ obj: newRandomZeroLenTransactionForEncodeTest(t, rand),
+ })
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d with zero length variable length contents set to nil", i),
+ obj: newRandomZeroLenNilTransactionForEncodeTest(t, rand),
+ })
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ testSkyencoderTransaction(t, tc.obj)
+ })
+ }
+}
+
+func decodeTransactionExpectError(t *testing.T, buf []byte, expectedErr error) {
+ var obj Transaction
+ if _, err := decodeTransaction(buf, &obj); err == nil {
+ t.Fatal("decodeTransaction: expected error, got nil")
+ } else if err != expectedErr {
+ t.Fatalf("decodeTransaction: expected error %q, got %q", expectedErr, err)
+ }
+}
+
+func decodeTransactionExactExpectError(t *testing.T, buf []byte, expectedErr error) {
+ var obj Transaction
+ if err := decodeTransactionExact(buf, &obj); err == nil {
+ t.Fatal("decodeTransactionExact: expected error, got nil")
+ } else if err != expectedErr {
+ t.Fatalf("decodeTransactionExact: expected error %q, got %q", expectedErr, err)
+ }
+}
+
+func testSkyencoderTransactionDecodeErrors(t *testing.T, k int, tag string, obj *Transaction) {
+ isEncodableField := func(f reflect.StructField) bool {
+ // Skip unexported fields
+ if f.PkgPath != "" {
+ return false
+ }
+
+ // Skip fields disabled with and enc:"- struct tag
+ tag := f.Tag.Get("enc")
+ return !strings.HasPrefix(tag, "-,") && tag != "-"
+ }
+
+ numEncodableFields := func(obj interface{}) int {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+
+ n := 0
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if !isEncodableField(f) {
+ continue
+ }
+ n++
+ }
+ return n
+ default:
+ return 0
+ }
+ }
+
+ hasOmitEmptyField := func(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+ n := v.NumField()
+ f := t.Field(n - 1)
+ tag := f.Tag.Get("enc")
+ return isEncodableField(f) && strings.Contains(tag, ",omitempty")
+ default:
+ return false
+ }
+ }
+
+ // returns the number of bytes encoded by an omitempty field on a given object
+ omitEmptyLen := func(obj interface{}) uint64 {
+ if !hasOmitEmptyField(obj) {
+ return 0
+ }
+
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ n := v.NumField()
+ f := v.Field(n - 1)
+ if f.Len() == 0 {
+ return 0
+ }
+ return uint64(4 + f.Len())
+
+ default:
+ return 0
+ }
+ }
+
+ n := encodeSizeTransaction(obj)
+ buf, err := encodeTransaction(obj)
+ if err != nil {
+ t.Fatalf("encodeTransaction failed: %v", err)
+ }
+
+ // A nil buffer cannot decode, unless the object is a struct with a single omitempty field
+ if hasOmitEmptyField(obj) && numEncodableFields(obj) > 1 {
+ t.Run(fmt.Sprintf("%d %s buffer underflow nil", k, tag), func(t *testing.T) {
+ decodeTransactionExpectError(t, nil, encoder.ErrBufferUnderflow)
+ })
+
+ t.Run(fmt.Sprintf("%d %s exact buffer underflow nil", k, tag), func(t *testing.T) {
+ decodeTransactionExactExpectError(t, nil, encoder.ErrBufferUnderflow)
+ })
+ }
+
+ // Test all possible truncations of the encoded byte array, but skip
+ // a truncation that would be valid where omitempty is removed
+ skipN := n - omitEmptyLen(obj)
+ for i := uint64(0); i < n; i++ {
+ if i == skipN {
+ continue
+ }
+
+ t.Run(fmt.Sprintf("%d %s buffer underflow bytes=%d", k, tag, i), func(t *testing.T) {
+ decodeTransactionExpectError(t, buf[:i], encoder.ErrBufferUnderflow)
+ })
+
+ t.Run(fmt.Sprintf("%d %s exact buffer underflow bytes=%d", k, tag, i), func(t *testing.T) {
+ decodeTransactionExactExpectError(t, buf[:i], encoder.ErrBufferUnderflow)
+ })
+ }
+
+ // Append 5 bytes for omit empty with a 0 length prefix, to cause an ErrRemainingBytes.
+ // If only 1 byte is appended, the decoder will try to read the 4-byte length prefix,
+ // and return an ErrBufferUnderflow instead
+ if hasOmitEmptyField(obj) {
+ buf = append(buf, []byte{0, 0, 0, 0, 0}...)
+ } else {
+ buf = append(buf, 0)
+ }
+
+ t.Run(fmt.Sprintf("%d %s exact buffer remaining bytes", k, tag), func(t *testing.T) {
+ decodeTransactionExactExpectError(t, buf, encoder.ErrRemainingBytes)
+ })
+}
+
+func TestSkyencoderTransactionDecodeErrors(t *testing.T) {
+ rand := mathrand.New(mathrand.NewSource(time.Now().Unix()))
+ n := 10
+
+ for i := 0; i < n; i++ {
+ emptyObj := newEmptyTransactionForEncodeTest()
+ fullObj := newRandomTransactionForEncodeTest(t, rand)
+ testSkyencoderTransactionDecodeErrors(t, i, "empty", emptyObj)
+ testSkyencoderTransactionDecodeErrors(t, i, "full", fullObj)
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/coin/transactions_test.go b/vendor/github.com/skycoin/skycoin/src/coin/transactions_test.go
index 29c5e62..84f839d 100644
--- a/vendor/github.com/skycoin/skycoin/src/coin/transactions_test.go
+++ b/vendor/github.com/skycoin/skycoin/src/coin/transactions_test.go
@@ -2,6 +2,7 @@ package coin
import (
"bytes"
+ "encoding/hex"
"errors"
"math"
"sort"
@@ -14,24 +15,52 @@ import (
"github.com/skycoin/skycoin/src/cipher/encoder"
"github.com/skycoin/skycoin/src/testutil"
_require "github.com/skycoin/skycoin/src/testutil/require"
+ "github.com/skycoin/skycoin/src/util/mathutil"
)
-func makeTransactionFromUxOut(ux UxOut, s cipher.SecKey) Transaction {
- tx := Transaction{}
- tx.PushInput(ux.Hash())
- tx.PushOutput(makeAddress(), 1e6, 50)
- tx.PushOutput(makeAddress(), 5e6, 50)
- tx.SignInputs([]cipher.SecKey{s})
- tx.UpdateHeader()
- return tx
+func makeTransactionFromUxOuts(t *testing.T, uxs []UxOut, secs []cipher.SecKey) Transaction {
+ require.Equal(t, len(uxs), len(secs))
+
+ txn := Transaction{}
+
+ err := txn.PushOutput(makeAddress(), 1e6, 50)
+ require.NoError(t, err)
+ err = txn.PushOutput(makeAddress(), 5e6, 50)
+ require.NoError(t, err)
+
+ for _, ux := range uxs {
+ err = txn.PushInput(ux.Hash())
+ require.NoError(t, err)
+ }
+
+ txn.SignInputs(secs)
+
+ err = txn.UpdateHeader()
+ require.NoError(t, err)
+ return txn
+}
+
+func makeTransactionFromUxOut(t *testing.T, ux UxOut, s cipher.SecKey) Transaction {
+ return makeTransactionFromUxOuts(t, []UxOut{ux}, []cipher.SecKey{s})
}
func makeTransaction(t *testing.T) Transaction {
ux, s := makeUxOutWithSecret(t)
- return makeTransactionFromUxOut(ux, s)
+ return makeTransactionFromUxOut(t, ux, s)
+}
+
+func makeTransactionMultipleInputs(t *testing.T, n int) (Transaction, []cipher.SecKey) {
+ uxs := make([]UxOut, n)
+ secs := make([]cipher.SecKey, n)
+ for i := 0; i < n; i++ {
+ ux, s := makeUxOutWithSecret(t)
+ uxs[i] = ux
+ secs[i] = s
+ }
+ return makeTransactionFromUxOuts(t, uxs, secs), secs
}
-func makeTransactions(t *testing.T, n int) Transactions { // nolint: unparam
+func makeTransactions(t *testing.T, n int) Transactions { //nolint:unparam
txns := make(Transactions, n)
for i := range txns {
txns[i] = makeTransaction(t)
@@ -44,359 +73,477 @@ func makeAddress() cipher.Address {
return cipher.AddressFromPubKey(p)
}
-func copyTransaction(tx Transaction) Transaction {
+func copyTransaction(txn Transaction) Transaction {
txo := Transaction{}
- txo.Length = tx.Length
- txo.Type = tx.Type
- txo.InnerHash = tx.InnerHash
- txo.Sigs = make([]cipher.Sig, len(tx.Sigs))
- copy(txo.Sigs, tx.Sigs)
- txo.In = make([]cipher.SHA256, len(tx.In))
- copy(txo.In, tx.In)
- txo.Out = make([]TransactionOutput, len(tx.Out))
- copy(txo.Out, tx.Out)
+ txo.Length = txn.Length
+ txo.Type = txn.Type
+ txo.InnerHash = txn.InnerHash
+ txo.Sigs = make([]cipher.Sig, len(txn.Sigs))
+ copy(txo.Sigs, txn.Sigs)
+ txo.In = make([]cipher.SHA256, len(txn.In))
+ copy(txo.In, txn.In)
+ txo.Out = make([]TransactionOutput, len(txn.Out))
+ copy(txo.Out, txn.Out)
return txo
}
func TestTransactionVerify(t *testing.T) {
// Mismatch header hash
- tx := makeTransaction(t)
- tx.InnerHash = cipher.SHA256{}
- testutil.RequireError(t, tx.Verify(), "Invalid header hash")
+ txn := makeTransaction(t)
+ txn.InnerHash = cipher.SHA256{}
+ testutil.RequireError(t, txn.Verify(), "InnerHash does not match computed hash")
// No inputs
- tx = makeTransaction(t)
- tx.In = make([]cipher.SHA256, 0)
- tx.UpdateHeader()
- testutil.RequireError(t, tx.Verify(), "No inputs")
+ txn = makeTransaction(t)
+ txn.In = make([]cipher.SHA256, 0)
+ err := txn.UpdateHeader()
+ require.NoError(t, err)
+ testutil.RequireError(t, txn.Verify(), "No inputs")
// No outputs
- tx = makeTransaction(t)
- tx.Out = make([]TransactionOutput, 0)
- tx.UpdateHeader()
- testutil.RequireError(t, tx.Verify(), "No outputs")
+ txn = makeTransaction(t)
+ txn.Out = make([]TransactionOutput, 0)
+ err = txn.UpdateHeader()
+ require.NoError(t, err)
+ testutil.RequireError(t, txn.Verify(), "No outputs")
// Invalid number of sigs
- tx = makeTransaction(t)
- tx.Sigs = make([]cipher.Sig, 0)
- tx.UpdateHeader()
- testutil.RequireError(t, tx.Verify(), "Invalid number of signatures")
- tx.Sigs = make([]cipher.Sig, 20)
- tx.UpdateHeader()
- testutil.RequireError(t, tx.Verify(), "Invalid number of signatures")
+ txn = makeTransaction(t)
+ txn.Sigs = make([]cipher.Sig, 0)
+ err = txn.UpdateHeader()
+ require.NoError(t, err)
+ testutil.RequireError(t, txn.Verify(), "Invalid number of signatures")
+ txn.Sigs = make([]cipher.Sig, 20)
+ err = txn.UpdateHeader()
+ require.NoError(t, err)
+ testutil.RequireError(t, txn.Verify(), "Invalid number of signatures")
// Too many sigs & inputs
- tx = makeTransaction(t)
- tx.Sigs = make([]cipher.Sig, math.MaxUint16)
- tx.In = make([]cipher.SHA256, math.MaxUint16)
- tx.UpdateHeader()
- testutil.RequireError(t, tx.Verify(), "Too many signatures and inputs")
+ txn = makeTransaction(t)
+ txn.Sigs = make([]cipher.Sig, math.MaxUint16+1)
+ txn.In = make([]cipher.SHA256, math.MaxUint16+1)
+ testutil.RequireError(t, txn.Verify(), "Too many signatures and inputs")
// Duplicate inputs
ux, s := makeUxOutWithSecret(t)
- tx = makeTransactionFromUxOut(ux, s)
- tx.PushInput(tx.In[0])
- tx.Sigs = nil
- tx.SignInputs([]cipher.SecKey{s, s})
- tx.UpdateHeader()
- testutil.RequireError(t, tx.Verify(), "Duplicate spend")
+ txn = makeTransactionFromUxOut(t, ux, s)
+ err = txn.PushInput(txn.In[0])
+ require.NoError(t, err)
+ txn.Sigs = nil
+ txn.SignInputs([]cipher.SecKey{s, s})
+ err = txn.UpdateHeader()
+ require.NoError(t, err)
+ testutil.RequireError(t, txn.Verify(), "Duplicate spend")
// Duplicate outputs
- tx = makeTransaction(t)
- to := tx.Out[0]
- tx.PushOutput(to.Address, to.Coins, to.Hours)
- tx.UpdateHeader()
- testutil.RequireError(t, tx.Verify(), "Duplicate output in transaction")
+ txn = makeTransaction(t)
+ to := txn.Out[0]
+ err = txn.PushOutput(to.Address, to.Coins, to.Hours)
+ require.NoError(t, err)
+ err = txn.UpdateHeader()
+ require.NoError(t, err)
+ testutil.RequireError(t, txn.Verify(), "Duplicate output in transaction")
// Invalid signature, empty
- tx = makeTransaction(t)
- tx.Sigs[0] = cipher.Sig{}
- testutil.RequireError(t, tx.Verify(), "Failed to recover public key")
+ txn = makeTransaction(t)
+ txn.Sigs[0] = cipher.Sig{}
+ testutil.RequireError(t, txn.Verify(), "Unsigned input in transaction")
+
+ // Invalid signature, not empty
+ // A stable invalid signature must be used because random signatures could appear valid
+ // Note: Transaction.Verify() only checks that the signature is a minimally valid signature
+ badSig := "9a0f86874a4d9541f58a1de4db1c1b58765a868dc6f027445d0a2a8a7bddd1c45ea559fcd7bef45e1b76ccdaf8e50bbebd952acbbea87d1cb3f7a964bc89bf1ed5"
+ txn = makeTransaction(t)
+ txn.Sigs[0] = cipher.MustSigFromHex(badSig)
+ testutil.RequireError(t, txn.Verify(), "Failed to recover pubkey from signature")
+
// We can't check here for other invalid signatures:
// - Signatures signed by someone else, spending coins they don't own
- // - Signature is for wrong hash
+ // - Signatures signing a different message
// This must be done by blockchain tests, because we need the address
// from the unspent being spent
+ // The verification here only checks that the signature is valid at all
// Output coins are 0
- tx = makeTransaction(t)
- tx.Out[0].Coins = 0
- tx.UpdateHeader()
- testutil.RequireError(t, tx.Verify(), "Zero coin output")
+ txn = makeTransaction(t)
+ txn.Out[0].Coins = 0
+ err = txn.UpdateHeader()
+ require.NoError(t, err)
+ testutil.RequireError(t, txn.Verify(), "Zero coin output")
// Output coin overflow
- tx = makeTransaction(t)
- tx.Out[0].Coins = math.MaxUint64 - 3e6
- tx.UpdateHeader()
- testutil.RequireError(t, tx.Verify(), "Output coins overflow")
+ txn = makeTransaction(t)
+ txn.Out[0].Coins = math.MaxUint64 - 3e6
+ err = txn.UpdateHeader()
+ require.NoError(t, err)
+ testutil.RequireError(t, txn.Verify(), "Output coins overflow")
// Output coins are not multiples of 1e6 (valid, decimal restriction is not enforced here)
- tx = makeTransaction(t)
- tx.Out[0].Coins += 10
- tx.UpdateHeader()
- tx.Sigs = nil
- tx.SignInputs([]cipher.SecKey{genSecret})
- require.NotEqual(t, tx.Out[0].Coins%1e6, uint64(0))
- require.NoError(t, tx.Verify())
+ txn = makeTransaction(t)
+ txn.Out[0].Coins += 10
+ err = txn.UpdateHeader()
+ require.NoError(t, err)
+ txn.Sigs = nil
+ txn.SignInputs([]cipher.SecKey{genSecret})
+ require.NotEqual(t, txn.Out[0].Coins%1e6, uint64(0))
+ require.NoError(t, txn.Verify())
// Valid
- tx = makeTransaction(t)
- tx.Out[0].Coins = 10e6
- tx.Out[1].Coins = 1e6
- tx.UpdateHeader()
- require.Nil(t, tx.Verify())
+ txn = makeTransaction(t)
+ txn.Out[0].Coins = 10e6
+ txn.Out[1].Coins = 1e6
+ err = txn.UpdateHeader()
+ require.NoError(t, err)
+ require.NoError(t, txn.Verify())
+}
+
+func TestTransactionVerifyUnsigned(t *testing.T) {
+ txn, _ := makeTransactionMultipleInputs(t, 2)
+ err := txn.VerifyUnsigned()
+ testutil.RequireError(t, err, "Unsigned transaction must contain a null signature")
+
+ // Invalid signature, not empty
+ // A stable invalid signature must be used because random signatures could appear valid
+ // Note: Transaction.Verify() only checks that the signature is a minimally valid signature
+ badSig := "9a0f86874a4d9541f58a1de4db1c1b58765a868dc6f027445d0a2a8a7bddd1c45ea559fcd7bef45e1b76ccdaf8e50bbebd952acbbea87d1cb3f7a964bc89bf1ed5"
+ txn, _ = makeTransactionMultipleInputs(t, 2)
+ txn.Sigs[0] = cipher.Sig{}
+ txn.Sigs[1] = cipher.MustSigFromHex(badSig)
+ testutil.RequireError(t, txn.VerifyUnsigned(), "Failed to recover pubkey from signature")
+
+ txn.Sigs = nil
+ err = txn.VerifyUnsigned()
+ testutil.RequireError(t, err, "Invalid number of signatures")
+
+ // Transaction is unsigned if at least 1 signature is null
+ txn, _ = makeTransactionMultipleInputs(t, 3)
+ require.True(t, len(txn.Sigs) > 1)
+ txn.Sigs[0] = cipher.Sig{}
+ err = txn.VerifyUnsigned()
+ require.NoError(t, err)
+
+ // Transaction is unsigned if all signatures are null
+ for i := range txn.Sigs {
+ txn.Sigs[i] = cipher.Sig{}
+ }
+ err = txn.VerifyUnsigned()
+ require.NoError(t, err)
}
func TestTransactionVerifyInput(t *testing.T) {
// Invalid uxIn args
- tx := makeTransaction(t)
- _require.PanicsWithLogMessage(t, "tx.In != uxIn", func() {
- tx.VerifyInput(nil)
+ txn := makeTransaction(t)
+ _require.PanicsWithLogMessage(t, "txn.In != uxIn", func() {
+ _ = txn.VerifyInputSignatures(nil) //nolint:errcheck
})
- _require.PanicsWithLogMessage(t, "tx.In != uxIn", func() {
- tx.VerifyInput(UxArray{})
+ _require.PanicsWithLogMessage(t, "txn.In != uxIn", func() {
+ _ = txn.VerifyInputSignatures(UxArray{}) //nolint:errcheck
})
- _require.PanicsWithLogMessage(t, "tx.In != uxIn", func() {
- tx.VerifyInput(make(UxArray, 3))
+ _require.PanicsWithLogMessage(t, "txn.In != uxIn", func() {
+ _ = txn.VerifyInputSignatures(make(UxArray, 3)) //nolint:errcheck
})
- // tx.In != tx.Sigs
+ // txn.In != txn.Sigs
ux, s := makeUxOutWithSecret(t)
- tx = makeTransactionFromUxOut(ux, s)
- tx.Sigs = []cipher.Sig{}
- _require.PanicsWithLogMessage(t, "tx.In != tx.Sigs", func() {
- tx.VerifyInput(UxArray{ux})
+ txn = makeTransactionFromUxOut(t, ux, s)
+ txn.Sigs = []cipher.Sig{}
+ _require.PanicsWithLogMessage(t, "txn.In != txn.Sigs", func() {
+ _ = txn.VerifyInputSignatures(UxArray{ux}) //nolint:errcheck
})
ux, s = makeUxOutWithSecret(t)
- tx = makeTransactionFromUxOut(ux, s)
- tx.Sigs = append(tx.Sigs, cipher.Sig{})
- _require.PanicsWithLogMessage(t, "tx.In != tx.Sigs", func() {
- tx.VerifyInput(UxArray{ux})
+ txn = makeTransactionFromUxOut(t, ux, s)
+ txn.Sigs = append(txn.Sigs, cipher.Sig{})
+ _require.PanicsWithLogMessage(t, "txn.In != txn.Sigs", func() {
+ _ = txn.VerifyInputSignatures(UxArray{ux}) //nolint:errcheck
})
- // tx.InnerHash != tx.HashInner()
+ // txn.InnerHash != txn.HashInner()
ux, s = makeUxOutWithSecret(t)
- tx = makeTransactionFromUxOut(ux, s)
- tx.InnerHash = cipher.SHA256{}
+ txn = makeTransactionFromUxOut(t, ux, s)
+ txn.InnerHash = cipher.SHA256{}
_require.PanicsWithLogMessage(t, "Invalid Tx Inner Hash", func() {
- tx.VerifyInput(UxArray{ux})
+ _ = txn.VerifyInputSignatures(UxArray{ux}) //nolint:errcheck
})
- // tx.In does not match uxIn hashes
+ // txn.In does not match uxIn hashes
ux, s = makeUxOutWithSecret(t)
- tx = makeTransactionFromUxOut(ux, s)
+ txn = makeTransactionFromUxOut(t, ux, s)
_require.PanicsWithLogMessage(t, "Ux hash mismatch", func() {
- tx.VerifyInput(UxArray{UxOut{}})
+ _ = txn.VerifyInputSignatures(UxArray{UxOut{}}) //nolint:errcheck
})
- // Invalid signature
+ // Unsigned txn
ux, s = makeUxOutWithSecret(t)
- tx = makeTransactionFromUxOut(ux, s)
- tx.Sigs[0] = cipher.Sig{}
- err := tx.VerifyInput(UxArray{ux})
+ txn = makeTransactionFromUxOut(t, ux, s)
+ txn.Sigs[0] = cipher.Sig{}
+ err := txn.VerifyInputSignatures(UxArray{ux})
+ testutil.RequireError(t, err, "Unsigned input in transaction")
+
+ // Signature signed by someone else
+ ux, _ = makeUxOutWithSecret(t)
+ _, s2 := makeUxOutWithSecret(t)
+ txn = makeTransactionFromUxOut(t, ux, s2)
+ err = txn.VerifyInputSignatures(UxArray{ux})
testutil.RequireError(t, err, "Signature not valid for output being spent")
// Valid
ux, s = makeUxOutWithSecret(t)
- tx = makeTransactionFromUxOut(ux, s)
- err = tx.VerifyInput(UxArray{ux})
+ txn = makeTransactionFromUxOut(t, ux, s)
+ err = txn.VerifyInputSignatures(UxArray{ux})
require.NoError(t, err)
}
func TestTransactionPushInput(t *testing.T) {
- tx := &Transaction{}
+ txn := &Transaction{}
ux := makeUxOut(t)
- require.Equal(t, tx.PushInput(ux.Hash()), uint16(0))
- require.Equal(t, len(tx.In), 1)
- require.Equal(t, tx.In[0], ux.Hash())
- tx.In = append(tx.In, make([]cipher.SHA256, math.MaxUint16)...)
+ require.NoError(t, txn.PushInput(ux.Hash()))
+ require.Equal(t, len(txn.In), 1)
+ require.Equal(t, txn.In[0], ux.Hash())
+ txn.In = append(txn.In, make([]cipher.SHA256, math.MaxUint16)...)
ux = makeUxOut(t)
- require.Panics(t, func() { tx.PushInput(ux.Hash()) })
+ err := txn.PushInput(ux.Hash())
+ testutil.RequireError(t, err, "Max transaction inputs reached")
}
func TestTransactionPushOutput(t *testing.T) {
- tx := &Transaction{}
+ txn := &Transaction{}
a := makeAddress()
- tx.PushOutput(a, 100, 150)
- require.Equal(t, len(tx.Out), 1)
- require.Equal(t, tx.Out[0], TransactionOutput{
+ err := txn.PushOutput(a, 100, 150)
+ require.NoError(t, err)
+ require.Equal(t, len(txn.Out), 1)
+ require.Equal(t, txn.Out[0], TransactionOutput{
Address: a,
Coins: 100,
Hours: 150,
})
for i := 1; i < 20; i++ {
a := makeAddress()
- tx.PushOutput(a, uint64(i*100), uint64(i*50))
- require.Equal(t, len(tx.Out), i+1)
- require.Equal(t, tx.Out[i], TransactionOutput{
+ err := txn.PushOutput(a, uint64(i*100), uint64(i*50))
+ require.NoError(t, err)
+ require.Equal(t, len(txn.Out), i+1)
+ require.Equal(t, txn.Out[i], TransactionOutput{
Address: a,
Coins: uint64(i * 100),
Hours: uint64(i * 50),
})
}
+
+ txn.Out = append(txn.Out, make([]TransactionOutput, math.MaxUint16-len(txn.Out))...)
+ err = txn.PushOutput(a, 999, 999)
+ testutil.RequireError(t, err, "Max transaction outputs reached")
+}
+
+func TestTransactionSignInput(t *testing.T) {
+ txn, seckeys := makeTransactionMultipleInputs(t, 3)
+ require.True(t, txn.IsFullySigned())
+
+ // Input is already signed
+ err := txn.SignInput(seckeys[0], 0)
+ testutil.RequireError(t, err, "Input already signed")
+ require.True(t, txn.IsFullySigned())
+
+ // Input is not signed
+ txn.Sigs[1] = cipher.Sig{}
+ require.False(t, txn.IsFullySigned())
+ err = txn.SignInput(seckeys[1], 1)
+ require.NoError(t, err)
+ require.True(t, txn.IsFullySigned())
+ err = txn.SignInput(seckeys[1], 1)
+ testutil.RequireError(t, err, "Input already signed")
+
+ // Transaction has no sigs; sigs array is initialized
+ txn.Sigs = nil
+ require.False(t, txn.IsFullySigned())
+ err = txn.SignInput(seckeys[2], 2)
+ require.NoError(t, err)
+ require.False(t, txn.IsFullySigned())
+ require.Len(t, txn.Sigs, 3)
+ require.True(t, txn.Sigs[0].Null())
+ require.True(t, txn.Sigs[1].Null())
+ require.False(t, txn.Sigs[2].Null())
+
+ // SignInputs on a partially signed transaction fails
+ require.Panics(t, func() {
+ txn.SignInputs(seckeys)
+ })
+
+ // Signing the rest of the inputs individually works
+ err = txn.SignInput(seckeys[1], 1)
+ require.NoError(t, err)
+ require.False(t, txn.IsFullySigned())
+ err = txn.SignInput(seckeys[0], 0)
+ require.NoError(t, err)
+ require.True(t, txn.IsFullySigned())
+
+ // Can use SignInputs on allocated array of empty sigs
+ txn.Sigs = make([]cipher.Sig, 3)
+ txn.SignInputs(seckeys)
+ require.True(t, txn.IsFullySigned())
}
func TestTransactionSignInputs(t *testing.T) {
- tx := &Transaction{}
+ txn := &Transaction{}
// Panics if txns already signed
- tx.Sigs = append(tx.Sigs, cipher.Sig{})
- require.Panics(t, func() { tx.SignInputs([]cipher.SecKey{}) })
+ txn.Sigs = append(txn.Sigs, cipher.Sig{})
+ require.Panics(t, func() { txn.SignInputs([]cipher.SecKey{}) })
// Panics if not enough keys
- tx = &Transaction{}
+ txn = &Transaction{}
ux, s := makeUxOutWithSecret(t)
- tx.PushInput(ux.Hash())
+ err := txn.PushInput(ux.Hash())
+ require.NoError(t, err)
ux2, s2 := makeUxOutWithSecret(t)
- tx.PushInput(ux2.Hash())
- tx.PushOutput(makeAddress(), 40, 80)
- require.Equal(t, len(tx.Sigs), 0)
- require.Panics(t, func() { tx.SignInputs([]cipher.SecKey{s}) })
- require.Equal(t, len(tx.Sigs), 0)
+ err = txn.PushInput(ux2.Hash())
+ require.NoError(t, err)
+ err = txn.PushOutput(makeAddress(), 40, 80)
+ require.NoError(t, err)
+ require.Equal(t, len(txn.Sigs), 0)
+ require.Panics(t, func() { txn.SignInputs([]cipher.SecKey{s}) })
+ require.Equal(t, len(txn.Sigs), 0)
// Valid signing
- h := tx.HashInner()
- require.NotPanics(t, func() { tx.SignInputs([]cipher.SecKey{s, s2}) })
- require.Equal(t, len(tx.Sigs), 2)
- require.Equal(t, tx.HashInner(), h)
- p := cipher.PubKeyFromSecKey(s)
+ h := txn.HashInner()
+ require.NotPanics(t, func() { txn.SignInputs([]cipher.SecKey{s, s2}) })
+ require.Equal(t, len(txn.Sigs), 2)
+ h2 := txn.HashInner()
+ require.Equal(t, h2, h)
+ p := cipher.MustPubKeyFromSecKey(s)
a := cipher.AddressFromPubKey(p)
- p = cipher.PubKeyFromSecKey(s2)
+ p = cipher.MustPubKeyFromSecKey(s2)
a2 := cipher.AddressFromPubKey(p)
- require.Nil(t, cipher.ChkSig(a, cipher.AddSHA256(h, tx.In[0]), tx.Sigs[0]))
- require.Nil(t, cipher.ChkSig(a2, cipher.AddSHA256(h, tx.In[1]), tx.Sigs[1]))
- require.NotNil(t, cipher.ChkSig(a, h, tx.Sigs[1]))
- require.NotNil(t, cipher.ChkSig(a2, h, tx.Sigs[0]))
+ require.NoError(t, cipher.VerifyAddressSignedHash(a, txn.Sigs[0], cipher.AddSHA256(h, txn.In[0])))
+ require.NoError(t, cipher.VerifyAddressSignedHash(a2, txn.Sigs[1], cipher.AddSHA256(h, txn.In[1])))
+ require.Error(t, cipher.VerifyAddressSignedHash(a, txn.Sigs[1], h))
+ require.Error(t, cipher.VerifyAddressSignedHash(a2, txn.Sigs[0], h))
}
func TestTransactionHash(t *testing.T) {
- tx := makeTransaction(t)
- require.NotEqual(t, tx.Hash(), cipher.SHA256{})
- require.NotEqual(t, tx.HashInner(), tx.Hash())
+ txn := makeTransaction(t)
+ h := txn.Hash()
+ h2 := txn.HashInner()
+ require.NotEqual(t, h, cipher.SHA256{})
+ require.NotEqual(t, h2, h)
}
func TestTransactionUpdateHeader(t *testing.T) {
- tx := makeTransaction(t)
- h := tx.InnerHash
- tx.InnerHash = cipher.SHA256{}
- tx.UpdateHeader()
- require.NotEqual(t, tx.InnerHash, cipher.SHA256{})
- require.Equal(t, tx.InnerHash, h)
- require.Equal(t, tx.InnerHash, tx.HashInner())
+ txn := makeTransaction(t)
+ h := txn.InnerHash
+ txn.InnerHash = cipher.SHA256{}
+ err := txn.UpdateHeader()
+ require.NoError(t, err)
+ require.NotEqual(t, txn.InnerHash, cipher.SHA256{})
+ require.Equal(t, txn.InnerHash, h)
+ require.Equal(t, txn.InnerHash, txn.HashInner())
}
func TestTransactionHashInner(t *testing.T) {
- tx := makeTransaction(t)
+ txn := makeTransaction(t)
- h := tx.HashInner()
- require.NotEqual(t, h, cipher.SHA256{})
+ require.NotEqual(t, cipher.SHA256{}, txn.HashInner())
- // If tx.In is changed, hash should change
- tx2 := copyTransaction(tx)
+ // If txn.In is changed, inner hash should change
+ txn2 := copyTransaction(txn)
ux := makeUxOut(t)
- tx2.In[0] = ux.Hash()
- require.NotEqual(t, tx, tx2)
- require.Equal(t, tx2.In[0], ux.Hash())
- require.NotEqual(t, tx.HashInner(), tx2.HashInner())
+ txn2.In[0] = ux.Hash()
+ require.NotEqual(t, txn, txn2)
+ require.Equal(t, txn2.In[0], ux.Hash())
+ require.NotEqual(t, txn.HashInner(), txn2.HashInner())
- // If tx.Out is changed, hash should change
- tx2 = copyTransaction(tx)
+ // If txn.Out is changed, inner hash should change
+ txn2 = copyTransaction(txn)
a := makeAddress()
- tx2.Out[0].Address = a
- require.NotEqual(t, tx, tx2)
- require.Equal(t, tx2.Out[0].Address, a)
- require.NotEqual(t, tx.HashInner(), tx2.HashInner())
-
- // If tx.Head is changed, hash should not change
- tx2 = copyTransaction(tx)
- tx.Sigs = append(tx.Sigs, cipher.Sig{})
- require.Equal(t, tx.HashInner(), tx2.HashInner())
+ txn2.Out[0].Address = a
+ require.NotEqual(t, txn, txn2)
+ require.Equal(t, txn2.Out[0].Address, a)
+ require.NotEqual(t, txn.HashInner(), txn2.HashInner())
+
+ // If txn.Head is changed, inner hash should not change
+ txn2 = copyTransaction(txn)
+ txn.Sigs = append(txn.Sigs, cipher.Sig{})
+ require.Equal(t, txn.HashInner(), txn2.HashInner())
}
func TestTransactionSerialization(t *testing.T) {
- tx := makeTransaction(t)
- b := tx.Serialize()
- tx2, err := TransactionDeserialize(b)
+ txn := makeTransaction(t)
+ b, err := txn.Serialize()
+ require.NoError(t, err)
+ txn2, err := DeserializeTransaction(b)
+ require.NoError(t, err)
+ require.Equal(t, txn, txn2)
+
+ // Check reserializing deserialized txn
+ b2, err := txn2.Serialize()
+ require.NoError(t, err)
+ txn3, err := DeserializeTransaction(b2)
require.NoError(t, err)
- require.Equal(t, tx, tx2)
+ require.Equal(t, txn2, txn3)
+
+ // Check hex encode/decode followed by deserialize
+ s := hex.EncodeToString(b)
+ sb, err := hex.DecodeString(s)
+ require.NoError(t, err)
+ txn4, err := DeserializeTransaction(sb)
+ require.NoError(t, err)
+ require.Equal(t, txn2, txn4)
+
// Invalid deserialization
- require.Panics(t, func() { MustTransactionDeserialize([]byte{0x04}) })
-}
+ require.Panics(t, func() {
+ MustDeserializeTransaction([]byte{0x04})
+ })
-func TestTransactionOutputHours(t *testing.T) {
- tx := Transaction{}
- tx.PushOutput(makeAddress(), 1e6, 100)
- tx.PushOutput(makeAddress(), 1e6, 200)
- tx.PushOutput(makeAddress(), 1e6, 500)
- tx.PushOutput(makeAddress(), 1e6, 0)
- hours, err := tx.OutputHours()
+ // SerializeHex
+ x, err := txn.SerializeHex()
require.NoError(t, err)
- require.Equal(t, hours, uint64(800))
+ txn5, err := DeserializeTransactionHex(x)
+ require.NoError(t, err)
+ require.Equal(t, txn, txn5)
- tx.PushOutput(makeAddress(), 1e6, math.MaxUint64-700)
- _, err = tx.OutputHours()
- testutil.RequireError(t, err, "Transaction output hours overflow")
-}
+ // Invalid hex deserialization
+ require.Panics(t, func() {
+ MustDeserializeTransactionHex("foo")
+ })
-type outAddr struct {
- Addr cipher.Address
- Coins uint64
- Hours uint64
+ ss, err := txn.Serialize()
+ require.NoError(t, err)
+ require.Equal(t, ss, txn.MustSerialize())
+ sshh, err := txn.SerializeHex()
+ require.NoError(t, err)
+ require.Equal(t, sshh, txn.MustSerializeHex())
}
-func makeTx(s cipher.SecKey, ux *UxOut, outs []outAddr, tm uint64, seq uint64) (*Transaction, UxArray, error) {
- if ux == nil {
- // genesis block tx.
- tx := Transaction{}
- tx.PushOutput(outs[0].Addr, outs[0].Coins, outs[0].Hours)
- _, s = cipher.GenerateKeyPair()
- ux := UxOut{
- Head: UxHead{
- Time: 100,
- BkSeq: 0,
- },
- Body: UxBody{
- SrcTransaction: tx.InnerHash,
- Address: outs[0].Addr,
- Coins: outs[0].Coins,
- Hours: outs[0].Hours,
- },
- }
- return &tx, []UxOut{ux}, nil
- }
+func TestTransactionOutputHours(t *testing.T) {
+ txn := Transaction{}
+ err := txn.PushOutput(makeAddress(), 1e6, 100)
+ require.NoError(t, err)
+ err = txn.PushOutput(makeAddress(), 1e6, 200)
+ require.NoError(t, err)
+ err = txn.PushOutput(makeAddress(), 1e6, 500)
+ require.NoError(t, err)
+ err = txn.PushOutput(makeAddress(), 1e6, 0)
+ require.NoError(t, err)
+ hours, err := txn.OutputHours()
+ require.NoError(t, err)
+ require.Equal(t, hours, uint64(800))
- tx := Transaction{}
- tx.PushInput(ux.Hash())
- tx.SignInputs([]cipher.SecKey{s})
- for _, o := range outs {
- tx.PushOutput(o.Addr, o.Coins, o.Hours)
- }
- tx.UpdateHeader()
-
- uxo := make(UxArray, len(tx.Out))
- for i := range tx.Out {
- uxo[i] = UxOut{
- Head: UxHead{
- Time: tm,
- BkSeq: seq,
- },
- Body: UxBody{
- SrcTransaction: tx.Hash(),
- Address: tx.Out[i].Address,
- Coins: tx.Out[i].Coins,
- Hours: tx.Out[i].Hours,
- },
- }
- }
- return &tx, uxo, nil
+ err = txn.PushOutput(makeAddress(), 1e6, math.MaxUint64-700)
+ require.NoError(t, err)
+ _, err = txn.OutputHours()
+ testutil.RequireError(t, err, "Transaction output hours overflow")
}
func TestTransactionsSize(t *testing.T) {
txns := makeTransactions(t, 10)
- size := 0
- for _, tx := range txns {
- size += len(encoder.Serialize(&tx))
+ var size uint32
+ for _, txn := range txns {
+ encodedLen, err := mathutil.IntToUint32(len(encoder.Serialize(&txn)))
+ require.NoError(t, err)
+ size, err = mathutil.AddUint32(size, encodedLen)
+ require.NoError(t, err)
}
+
require.NotEqual(t, size, 0)
- require.Equal(t, txns.Size(), size)
+ s, err := txns.Size()
+ require.NoError(t, err)
+ require.Equal(t, s, size)
}
func TestTransactionsHashes(t *testing.T) {
@@ -413,50 +560,83 @@ func TestTransactionsHashes(t *testing.T) {
func TestTransactionsTruncateBytesTo(t *testing.T) {
txns := makeTransactions(t, 10)
- trunc := 0
+ var trunc uint32
for i := 0; i < len(txns)/2; i++ {
- trunc += txns[i].Size()
+ size, err := txns[i].Size()
+ require.NoError(t, err)
+ trunc, err = mathutil.AddUint32(trunc, size)
+ require.NoError(t, err)
}
+
// Truncating halfway
- txns2 := txns.TruncateBytesTo(trunc)
+ txns2, err := txns.TruncateBytesTo(trunc)
+ require.NoError(t, err)
require.Equal(t, len(txns2), len(txns)/2)
- require.Equal(t, txns2.Size(), trunc)
+ totalSize, err := txns2.Size()
+ require.NoError(t, err)
+ require.Equal(t, totalSize, trunc)
// Stepping into next boundary has same cutoff, must exceed
trunc++
- txns2 = txns.TruncateBytesTo(trunc)
+ txns2, err = txns.TruncateBytesTo(trunc)
+ require.NoError(t, err)
require.Equal(t, len(txns2), len(txns)/2)
- require.Equal(t, txns2.Size(), trunc-1)
+ totalSize, err = txns2.Size()
+ require.NoError(t, err)
+ require.Equal(t, totalSize, trunc-1)
// Moving to 1 before next level
- trunc += txns[5].Size() - 2
- txns2 = txns.TruncateBytesTo(trunc)
+ size5, err := txns[5].Size()
+ require.NoError(t, err)
+ require.True(t, size5 >= 2)
+ trunc, err = mathutil.AddUint32(trunc, size5-2)
+ require.NoError(t, err)
+ txns2, err = txns.TruncateBytesTo(trunc)
+ require.NoError(t, err)
require.Equal(t, len(txns2), len(txns)/2)
- require.Equal(t, txns2.Size(), trunc-txns[5].Size()+1)
+
+ totalSize, err = txns2.Size()
+ require.NoError(t, err)
+ size5, err = txns[5].Size()
+ require.NoError(t, err)
+ require.Equal(t, totalSize, trunc-size5+1)
// Moving to next level
trunc++
- txns2 = txns.TruncateBytesTo(trunc)
+ txns2, err = txns.TruncateBytesTo(trunc)
+ require.NoError(t, err)
require.Equal(t, len(txns2), len(txns)/2+1)
- require.Equal(t, txns2.Size(), trunc)
+ size, err := txns2.Size()
+ require.NoError(t, err)
+ require.Equal(t, size, trunc)
// Truncating to full available amt
- trunc = txns.Size()
- txns2 = txns.TruncateBytesTo(trunc)
+ trunc, err = txns.Size()
+ require.NoError(t, err)
+ txns2, err = txns.TruncateBytesTo(trunc)
+ require.NoError(t, err)
require.Equal(t, txns, txns2)
- require.Equal(t, txns2.Size(), trunc)
+ size, err = txns2.Size()
+ require.NoError(t, err)
+ require.Equal(t, size, trunc)
// Truncating over amount
trunc++
- txns2 = txns.TruncateBytesTo(trunc)
+ txns2, err = txns.TruncateBytesTo(trunc)
+ require.NoError(t, err)
require.Equal(t, txns, txns2)
- require.Equal(t, txns2.Size(), trunc-1)
+ size, err = txns2.Size()
+ require.NoError(t, err)
+ require.Equal(t, size, trunc-1)
// Truncating to 0
trunc = 0
- txns2 = txns.TruncateBytesTo(0)
+ txns2, err = txns.TruncateBytesTo(0)
+ require.NoError(t, err)
require.Equal(t, len(txns2), 0)
- require.Equal(t, txns2.Size(), trunc)
+ size, err = txns2.Size()
+ require.NoError(t, err)
+ require.Equal(t, size, trunc)
}
func TestVerifyTransactionCoinsSpending(t *testing.T) {
@@ -830,7 +1010,7 @@ func TestVerifyTransactionHoursSpending(t *testing.T) {
}
func TestTransactionsFees(t *testing.T) {
- calc := func(tx *Transaction) (uint64, error) {
+ calc := func(txn *Transaction) (uint64, error) {
return 1, nil
}
@@ -850,14 +1030,14 @@ func TestTransactionsFees(t *testing.T) {
require.Equal(t, uint64(2), fee)
// calc error
- failingCalc := func(tx *Transaction) (uint64, error) {
+ failingCalc := func(txn *Transaction) (uint64, error) {
return 0, errors.New("bad calc")
}
_, err = txns.Fees(failingCalc)
testutil.RequireError(t, err, "bad calc")
// summing of calculated fees overflows
- overflowCalc := func(tx *Transaction) (uint64, error) {
+ overflowCalc := func(txn *Transaction) (uint64, error) {
return math.MaxUint64, nil
}
@@ -870,15 +1050,14 @@ func TestSortTransactions(t *testing.T) {
var txns Transactions
for i := 0; i < n; i++ {
txn := Transaction{}
- txn.PushOutput(makeAddress(), 1e6, uint64(i*1e3))
- txn.UpdateHeader()
+ err := txn.PushOutput(makeAddress(), 1e6, uint64(i*1e3))
+ require.NoError(t, err)
+ err = txn.UpdateHeader()
+ require.NoError(t, err)
txns = append(txns, txn)
}
- var hashSortedTxns Transactions
- for _, txn := range txns {
- hashSortedTxns = append(hashSortedTxns, txn)
- }
+ hashSortedTxns := append(Transactions{}, txns...)
sort.Slice(hashSortedTxns, func(i, j int) bool {
ihash := hashSortedTxns[i].Hash()
@@ -946,8 +1125,35 @@ func TestSortTransactions(t *testing.T) {
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
- txns := SortTransactions(tc.txns, tc.feeCalc)
+ txns, err := SortTransactions(tc.txns, tc.feeCalc)
+ require.NoError(t, err)
require.Equal(t, tc.sortedTxns, txns)
})
}
}
+
+func TestTransactionSignedUnsigned(t *testing.T) {
+ txn, _ := makeTransactionMultipleInputs(t, 2)
+ require.True(t, txn.IsFullySigned())
+ require.True(t, txn.hasNonNullSignature())
+ require.False(t, txn.IsFullyUnsigned())
+ require.False(t, txn.hasNullSignature())
+
+ txn.Sigs[1] = cipher.Sig{}
+ require.False(t, txn.IsFullySigned())
+ require.True(t, txn.hasNonNullSignature())
+ require.False(t, txn.IsFullyUnsigned())
+ require.True(t, txn.hasNullSignature())
+
+ txn.Sigs[0] = cipher.Sig{}
+ require.False(t, txn.IsFullySigned())
+ require.False(t, txn.hasNonNullSignature())
+ require.True(t, txn.IsFullyUnsigned())
+ require.True(t, txn.hasNullSignature())
+
+ txn.Sigs = nil
+ require.False(t, txn.IsFullySigned())
+ require.False(t, txn.hasNonNullSignature())
+ require.True(t, txn.IsFullyUnsigned())
+ require.False(t, txn.hasNullSignature())
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/coin/ux_body_skyencoder_test.go b/vendor/github.com/skycoin/skycoin/src/coin/ux_body_skyencoder_test.go
new file mode 100644
index 0000000..84920a5
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/coin/ux_body_skyencoder_test.go
@@ -0,0 +1,421 @@
+// Code generated by github.com/skycoin/skyencoder. DO NOT EDIT.
+
+package coin
+
+import (
+ "bytes"
+ "fmt"
+ mathrand "math/rand"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/skycoin/encodertest"
+ "github.com/skycoin/skycoin/src/cipher/encoder"
+)
+
+func newEmptyUxBodyForEncodeTest() *UxBody {
+ var obj UxBody
+ return &obj
+}
+
+func newRandomUxBodyForEncodeTest(t *testing.T, rand *mathrand.Rand) *UxBody {
+ var obj UxBody
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 4,
+ MinRandLen: 1,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func newRandomZeroLenUxBodyForEncodeTest(t *testing.T, rand *mathrand.Rand) *UxBody {
+ var obj UxBody
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 0,
+ MinRandLen: 0,
+ EmptySliceNil: false,
+ EmptyMapNil: false,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func newRandomZeroLenNilUxBodyForEncodeTest(t *testing.T, rand *mathrand.Rand) *UxBody {
+ var obj UxBody
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 0,
+ MinRandLen: 0,
+ EmptySliceNil: true,
+ EmptyMapNil: true,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func testSkyencoderUxBody(t *testing.T, obj *UxBody) {
+ isEncodableField := func(f reflect.StructField) bool {
+ // Skip unexported fields
+ if f.PkgPath != "" {
+ return false
+ }
+
+ // Skip fields disabled with and enc:"- struct tag
+ tag := f.Tag.Get("enc")
+ return !strings.HasPrefix(tag, "-,") && tag != "-"
+ }
+
+ hasOmitEmptyField := func(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+ n := v.NumField()
+ f := t.Field(n - 1)
+ tag := f.Tag.Get("enc")
+ return isEncodableField(f) && strings.Contains(tag, ",omitempty")
+ default:
+ return false
+ }
+ }
+
+ // returns the number of bytes encoded by an omitempty field on a given object
+ omitEmptyLen := func(obj interface{}) uint64 {
+ if !hasOmitEmptyField(obj) {
+ return 0
+ }
+
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ n := v.NumField()
+ f := v.Field(n - 1)
+ if f.Len() == 0 {
+ return 0
+ }
+ return uint64(4 + f.Len())
+
+ default:
+ return 0
+ }
+ }
+
+ // encodeSize
+
+ n1 := encoder.Size(obj)
+ n2 := encodeSizeUxBody(obj)
+
+ if uint64(n1) != n2 {
+ t.Fatalf("encoder.Size() != encodeSizeUxBody() (%d != %d)", n1, n2)
+ }
+
+ // Encode
+
+ // encoder.Serialize
+ data1 := encoder.Serialize(obj)
+
+ // Encode
+ data2, err := encodeUxBody(obj)
+ if err != nil {
+ t.Fatalf("encodeUxBody failed: %v", err)
+ }
+ if uint64(len(data2)) != n2 {
+ t.Fatal("encodeUxBody produced bytes of unexpected length")
+ }
+ if len(data1) != len(data2) {
+ t.Fatalf("len(encoder.Serialize()) != len(encodeUxBody()) (%d != %d)", len(data1), len(data2))
+ }
+
+ // EncodeToBuffer
+ data3 := make([]byte, n2+5)
+ if err := encodeUxBodyToBuffer(data3, obj); err != nil {
+ t.Fatalf("encodeUxBodyToBuffer failed: %v", err)
+ }
+
+ if !bytes.Equal(data1, data2) {
+ t.Fatal("encoder.Serialize() != encode[1]s()")
+ }
+
+ // Decode
+
+ // encoder.DeserializeRaw
+ var obj2 UxBody
+ if n, err := encoder.DeserializeRaw(data1, &obj2); err != nil {
+ t.Fatalf("encoder.DeserializeRaw failed: %v", err)
+ } else if n != uint64(len(data1)) {
+ t.Fatalf("encoder.DeserializeRaw failed: %v", encoder.ErrRemainingBytes)
+ }
+ if !cmp.Equal(*obj, obj2, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw result wrong")
+ }
+
+ // Decode
+ var obj3 UxBody
+ if n, err := decodeUxBody(data2, &obj3); err != nil {
+ t.Fatalf("decodeUxBody failed: %v", err)
+ } else if n != uint64(len(data2)) {
+ t.Fatalf("decodeUxBody bytes read length should be %d, is %d", len(data2), n)
+ }
+ if !cmp.Equal(obj2, obj3, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeUxBody()")
+ }
+
+ // Decode, excess buffer
+ var obj4 UxBody
+ n, err := decodeUxBody(data3, &obj4)
+ if err != nil {
+ t.Fatalf("decodeUxBody failed: %v", err)
+ }
+
+ if hasOmitEmptyField(&obj4) && omitEmptyLen(&obj4) == 0 {
+ // 4 bytes read for the omitEmpty length, which should be zero (see the 5 bytes added above)
+ if n != n2+4 {
+ t.Fatalf("decodeUxBody bytes read length should be %d, is %d", n2+4, n)
+ }
+ } else {
+ if n != n2 {
+ t.Fatalf("decodeUxBody bytes read length should be %d, is %d", n2, n)
+ }
+ }
+ if !cmp.Equal(obj2, obj4, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeUxBody()")
+ }
+
+ // DecodeExact
+ var obj5 UxBody
+ if err := decodeUxBodyExact(data2, &obj5); err != nil {
+ t.Fatalf("decodeUxBody failed: %v", err)
+ }
+ if !cmp.Equal(obj2, obj5, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeUxBody()")
+ }
+
+ // Check that the bytes read value is correct when providing an extended buffer
+ if !hasOmitEmptyField(&obj3) || omitEmptyLen(&obj3) > 0 {
+ padding := []byte{0xFF, 0xFE, 0xFD, 0xFC}
+ data4 := append(data2[:], padding...)
+ if n, err := decodeUxBody(data4, &obj3); err != nil {
+ t.Fatalf("decodeUxBody failed: %v", err)
+ } else if n != uint64(len(data2)) {
+ t.Fatalf("decodeUxBody bytes read length should be %d, is %d", len(data2), n)
+ }
+ }
+}
+
+func TestSkyencoderUxBody(t *testing.T) {
+ rand := mathrand.New(mathrand.NewSource(time.Now().Unix()))
+
+ type testCase struct {
+ name string
+ obj *UxBody
+ }
+
+ cases := []testCase{
+ {
+ name: "empty object",
+ obj: newEmptyUxBodyForEncodeTest(),
+ },
+ }
+
+ nRandom := 10
+
+ for i := 0; i < nRandom; i++ {
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d", i),
+ obj: newRandomUxBodyForEncodeTest(t, rand),
+ })
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d with zero length variable length contents", i),
+ obj: newRandomZeroLenUxBodyForEncodeTest(t, rand),
+ })
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d with zero length variable length contents set to nil", i),
+ obj: newRandomZeroLenNilUxBodyForEncodeTest(t, rand),
+ })
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ testSkyencoderUxBody(t, tc.obj)
+ })
+ }
+}
+
+func decodeUxBodyExpectError(t *testing.T, buf []byte, expectedErr error) {
+ var obj UxBody
+ if _, err := decodeUxBody(buf, &obj); err == nil {
+ t.Fatal("decodeUxBody: expected error, got nil")
+ } else if err != expectedErr {
+ t.Fatalf("decodeUxBody: expected error %q, got %q", expectedErr, err)
+ }
+}
+
+func decodeUxBodyExactExpectError(t *testing.T, buf []byte, expectedErr error) {
+ var obj UxBody
+ if err := decodeUxBodyExact(buf, &obj); err == nil {
+ t.Fatal("decodeUxBodyExact: expected error, got nil")
+ } else if err != expectedErr {
+ t.Fatalf("decodeUxBodyExact: expected error %q, got %q", expectedErr, err)
+ }
+}
+
+func testSkyencoderUxBodyDecodeErrors(t *testing.T, k int, tag string, obj *UxBody) {
+ isEncodableField := func(f reflect.StructField) bool {
+ // Skip unexported fields
+ if f.PkgPath != "" {
+ return false
+ }
+
+ // Skip fields disabled with and enc:"- struct tag
+ tag := f.Tag.Get("enc")
+ return !strings.HasPrefix(tag, "-,") && tag != "-"
+ }
+
+ numEncodableFields := func(obj interface{}) int {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+
+ n := 0
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if !isEncodableField(f) {
+ continue
+ }
+ n++
+ }
+ return n
+ default:
+ return 0
+ }
+ }
+
+ hasOmitEmptyField := func(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+ n := v.NumField()
+ f := t.Field(n - 1)
+ tag := f.Tag.Get("enc")
+ return isEncodableField(f) && strings.Contains(tag, ",omitempty")
+ default:
+ return false
+ }
+ }
+
+ // returns the number of bytes encoded by an omitempty field on a given object
+ omitEmptyLen := func(obj interface{}) uint64 {
+ if !hasOmitEmptyField(obj) {
+ return 0
+ }
+
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ n := v.NumField()
+ f := v.Field(n - 1)
+ if f.Len() == 0 {
+ return 0
+ }
+ return uint64(4 + f.Len())
+
+ default:
+ return 0
+ }
+ }
+
+ n := encodeSizeUxBody(obj)
+ buf, err := encodeUxBody(obj)
+ if err != nil {
+ t.Fatalf("encodeUxBody failed: %v", err)
+ }
+
+ // A nil buffer cannot decode, unless the object is a struct with a single omitempty field
+ if hasOmitEmptyField(obj) && numEncodableFields(obj) > 1 {
+ t.Run(fmt.Sprintf("%d %s buffer underflow nil", k, tag), func(t *testing.T) {
+ decodeUxBodyExpectError(t, nil, encoder.ErrBufferUnderflow)
+ })
+
+ t.Run(fmt.Sprintf("%d %s exact buffer underflow nil", k, tag), func(t *testing.T) {
+ decodeUxBodyExactExpectError(t, nil, encoder.ErrBufferUnderflow)
+ })
+ }
+
+ // Test all possible truncations of the encoded byte array, but skip
+ // a truncation that would be valid where omitempty is removed
+ skipN := n - omitEmptyLen(obj)
+ for i := uint64(0); i < n; i++ {
+ if i == skipN {
+ continue
+ }
+
+ t.Run(fmt.Sprintf("%d %s buffer underflow bytes=%d", k, tag, i), func(t *testing.T) {
+ decodeUxBodyExpectError(t, buf[:i], encoder.ErrBufferUnderflow)
+ })
+
+ t.Run(fmt.Sprintf("%d %s exact buffer underflow bytes=%d", k, tag, i), func(t *testing.T) {
+ decodeUxBodyExactExpectError(t, buf[:i], encoder.ErrBufferUnderflow)
+ })
+ }
+
+ // Append 5 bytes for omit empty with a 0 length prefix, to cause an ErrRemainingBytes.
+ // If only 1 byte is appended, the decoder will try to read the 4-byte length prefix,
+ // and return an ErrBufferUnderflow instead
+ if hasOmitEmptyField(obj) {
+ buf = append(buf, []byte{0, 0, 0, 0, 0}...)
+ } else {
+ buf = append(buf, 0)
+ }
+
+ t.Run(fmt.Sprintf("%d %s exact buffer remaining bytes", k, tag), func(t *testing.T) {
+ decodeUxBodyExactExpectError(t, buf, encoder.ErrRemainingBytes)
+ })
+}
+
+func TestSkyencoderUxBodyDecodeErrors(t *testing.T) {
+ rand := mathrand.New(mathrand.NewSource(time.Now().Unix()))
+ n := 10
+
+ for i := 0; i < n; i++ {
+ emptyObj := newEmptyUxBodyForEncodeTest()
+ fullObj := newRandomUxBodyForEncodeTest(t, rand)
+ testSkyencoderUxBodyDecodeErrors(t, i, "empty", emptyObj)
+ testSkyencoderUxBodyDecodeErrors(t, i, "full", fullObj)
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/coin/ux_head_skyencoder_test.go b/vendor/github.com/skycoin/skycoin/src/coin/ux_head_skyencoder_test.go
new file mode 100644
index 0000000..ddf75e9
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/coin/ux_head_skyencoder_test.go
@@ -0,0 +1,421 @@
+// Code generated by github.com/skycoin/skyencoder. DO NOT EDIT.
+
+package coin
+
+import (
+ "bytes"
+ "fmt"
+ mathrand "math/rand"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/skycoin/encodertest"
+ "github.com/skycoin/skycoin/src/cipher/encoder"
+)
+
+func newEmptyUxHeadForEncodeTest() *UxHead {
+ var obj UxHead
+ return &obj
+}
+
+func newRandomUxHeadForEncodeTest(t *testing.T, rand *mathrand.Rand) *UxHead {
+ var obj UxHead
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 4,
+ MinRandLen: 1,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func newRandomZeroLenUxHeadForEncodeTest(t *testing.T, rand *mathrand.Rand) *UxHead {
+ var obj UxHead
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 0,
+ MinRandLen: 0,
+ EmptySliceNil: false,
+ EmptyMapNil: false,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func newRandomZeroLenNilUxHeadForEncodeTest(t *testing.T, rand *mathrand.Rand) *UxHead {
+ var obj UxHead
+ err := encodertest.PopulateRandom(&obj, rand, encodertest.PopulateRandomOptions{
+ MaxRandLen: 0,
+ MinRandLen: 0,
+ EmptySliceNil: true,
+ EmptyMapNil: true,
+ })
+ if err != nil {
+ t.Fatalf("encodertest.PopulateRandom failed: %v", err)
+ }
+ return &obj
+}
+
+func testSkyencoderUxHead(t *testing.T, obj *UxHead) {
+ isEncodableField := func(f reflect.StructField) bool {
+ // Skip unexported fields
+ if f.PkgPath != "" {
+ return false
+ }
+
+ // Skip fields disabled with and enc:"- struct tag
+ tag := f.Tag.Get("enc")
+ return !strings.HasPrefix(tag, "-,") && tag != "-"
+ }
+
+ hasOmitEmptyField := func(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+ n := v.NumField()
+ f := t.Field(n - 1)
+ tag := f.Tag.Get("enc")
+ return isEncodableField(f) && strings.Contains(tag, ",omitempty")
+ default:
+ return false
+ }
+ }
+
+ // returns the number of bytes encoded by an omitempty field on a given object
+ omitEmptyLen := func(obj interface{}) uint64 {
+ if !hasOmitEmptyField(obj) {
+ return 0
+ }
+
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ n := v.NumField()
+ f := v.Field(n - 1)
+ if f.Len() == 0 {
+ return 0
+ }
+ return uint64(4 + f.Len())
+
+ default:
+ return 0
+ }
+ }
+
+ // encodeSize
+
+ n1 := encoder.Size(obj)
+ n2 := encodeSizeUxHead(obj)
+
+ if uint64(n1) != n2 {
+ t.Fatalf("encoder.Size() != encodeSizeUxHead() (%d != %d)", n1, n2)
+ }
+
+ // Encode
+
+ // encoder.Serialize
+ data1 := encoder.Serialize(obj)
+
+ // Encode
+ data2, err := encodeUxHead(obj)
+ if err != nil {
+ t.Fatalf("encodeUxHead failed: %v", err)
+ }
+ if uint64(len(data2)) != n2 {
+ t.Fatal("encodeUxHead produced bytes of unexpected length")
+ }
+ if len(data1) != len(data2) {
+ t.Fatalf("len(encoder.Serialize()) != len(encodeUxHead()) (%d != %d)", len(data1), len(data2))
+ }
+
+ // EncodeToBuffer
+ data3 := make([]byte, n2+5)
+ if err := encodeUxHeadToBuffer(data3, obj); err != nil {
+ t.Fatalf("encodeUxHeadToBuffer failed: %v", err)
+ }
+
+ if !bytes.Equal(data1, data2) {
+ t.Fatal("encoder.Serialize() != encode[1]s()")
+ }
+
+ // Decode
+
+ // encoder.DeserializeRaw
+ var obj2 UxHead
+ if n, err := encoder.DeserializeRaw(data1, &obj2); err != nil {
+ t.Fatalf("encoder.DeserializeRaw failed: %v", err)
+ } else if n != uint64(len(data1)) {
+ t.Fatalf("encoder.DeserializeRaw failed: %v", encoder.ErrRemainingBytes)
+ }
+ if !cmp.Equal(*obj, obj2, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw result wrong")
+ }
+
+ // Decode
+ var obj3 UxHead
+ if n, err := decodeUxHead(data2, &obj3); err != nil {
+ t.Fatalf("decodeUxHead failed: %v", err)
+ } else if n != uint64(len(data2)) {
+ t.Fatalf("decodeUxHead bytes read length should be %d, is %d", len(data2), n)
+ }
+ if !cmp.Equal(obj2, obj3, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeUxHead()")
+ }
+
+ // Decode, excess buffer
+ var obj4 UxHead
+ n, err := decodeUxHead(data3, &obj4)
+ if err != nil {
+ t.Fatalf("decodeUxHead failed: %v", err)
+ }
+
+ if hasOmitEmptyField(&obj4) && omitEmptyLen(&obj4) == 0 {
+ // 4 bytes read for the omitEmpty length, which should be zero (see the 5 bytes added above)
+ if n != n2+4 {
+ t.Fatalf("decodeUxHead bytes read length should be %d, is %d", n2+4, n)
+ }
+ } else {
+ if n != n2 {
+ t.Fatalf("decodeUxHead bytes read length should be %d, is %d", n2, n)
+ }
+ }
+ if !cmp.Equal(obj2, obj4, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeUxHead()")
+ }
+
+ // DecodeExact
+ var obj5 UxHead
+ if err := decodeUxHeadExact(data2, &obj5); err != nil {
+ t.Fatalf("decodeUxHead failed: %v", err)
+ }
+ if !cmp.Equal(obj2, obj5, cmpopts.EquateEmpty(), encodertest.IgnoreAllUnexported()) {
+ t.Fatal("encoder.DeserializeRaw() != decodeUxHead()")
+ }
+
+ // Check that the bytes read value is correct when providing an extended buffer
+ if !hasOmitEmptyField(&obj3) || omitEmptyLen(&obj3) > 0 {
+ padding := []byte{0xFF, 0xFE, 0xFD, 0xFC}
+ data4 := append(data2[:], padding...)
+ if n, err := decodeUxHead(data4, &obj3); err != nil {
+ t.Fatalf("decodeUxHead failed: %v", err)
+ } else if n != uint64(len(data2)) {
+ t.Fatalf("decodeUxHead bytes read length should be %d, is %d", len(data2), n)
+ }
+ }
+}
+
+func TestSkyencoderUxHead(t *testing.T) {
+ rand := mathrand.New(mathrand.NewSource(time.Now().Unix()))
+
+ type testCase struct {
+ name string
+ obj *UxHead
+ }
+
+ cases := []testCase{
+ {
+ name: "empty object",
+ obj: newEmptyUxHeadForEncodeTest(),
+ },
+ }
+
+ nRandom := 10
+
+ for i := 0; i < nRandom; i++ {
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d", i),
+ obj: newRandomUxHeadForEncodeTest(t, rand),
+ })
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d with zero length variable length contents", i),
+ obj: newRandomZeroLenUxHeadForEncodeTest(t, rand),
+ })
+ cases = append(cases, testCase{
+ name: fmt.Sprintf("randomly populated object %d with zero length variable length contents set to nil", i),
+ obj: newRandomZeroLenNilUxHeadForEncodeTest(t, rand),
+ })
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ testSkyencoderUxHead(t, tc.obj)
+ })
+ }
+}
+
+func decodeUxHeadExpectError(t *testing.T, buf []byte, expectedErr error) {
+ var obj UxHead
+ if _, err := decodeUxHead(buf, &obj); err == nil {
+ t.Fatal("decodeUxHead: expected error, got nil")
+ } else if err != expectedErr {
+ t.Fatalf("decodeUxHead: expected error %q, got %q", expectedErr, err)
+ }
+}
+
+func decodeUxHeadExactExpectError(t *testing.T, buf []byte, expectedErr error) {
+ var obj UxHead
+ if err := decodeUxHeadExact(buf, &obj); err == nil {
+ t.Fatal("decodeUxHeadExact: expected error, got nil")
+ } else if err != expectedErr {
+ t.Fatalf("decodeUxHeadExact: expected error %q, got %q", expectedErr, err)
+ }
+}
+
+func testSkyencoderUxHeadDecodeErrors(t *testing.T, k int, tag string, obj *UxHead) {
+ isEncodableField := func(f reflect.StructField) bool {
+ // Skip unexported fields
+ if f.PkgPath != "" {
+ return false
+ }
+
+ // Skip fields disabled with and enc:"- struct tag
+ tag := f.Tag.Get("enc")
+ return !strings.HasPrefix(tag, "-,") && tag != "-"
+ }
+
+ numEncodableFields := func(obj interface{}) int {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+
+ n := 0
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if !isEncodableField(f) {
+ continue
+ }
+ n++
+ }
+ return n
+ default:
+ return 0
+ }
+ }
+
+ hasOmitEmptyField := func(obj interface{}) bool {
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ t := v.Type()
+ n := v.NumField()
+ f := t.Field(n - 1)
+ tag := f.Tag.Get("enc")
+ return isEncodableField(f) && strings.Contains(tag, ",omitempty")
+ default:
+ return false
+ }
+ }
+
+ // returns the number of bytes encoded by an omitempty field on a given object
+ omitEmptyLen := func(obj interface{}) uint64 {
+ if !hasOmitEmptyField(obj) {
+ return 0
+ }
+
+ v := reflect.ValueOf(obj)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ n := v.NumField()
+ f := v.Field(n - 1)
+ if f.Len() == 0 {
+ return 0
+ }
+ return uint64(4 + f.Len())
+
+ default:
+ return 0
+ }
+ }
+
+ n := encodeSizeUxHead(obj)
+ buf, err := encodeUxHead(obj)
+ if err != nil {
+ t.Fatalf("encodeUxHead failed: %v", err)
+ }
+
+ // A nil buffer cannot decode, unless the object is a struct with a single omitempty field
+ if hasOmitEmptyField(obj) && numEncodableFields(obj) > 1 {
+ t.Run(fmt.Sprintf("%d %s buffer underflow nil", k, tag), func(t *testing.T) {
+ decodeUxHeadExpectError(t, nil, encoder.ErrBufferUnderflow)
+ })
+
+ t.Run(fmt.Sprintf("%d %s exact buffer underflow nil", k, tag), func(t *testing.T) {
+ decodeUxHeadExactExpectError(t, nil, encoder.ErrBufferUnderflow)
+ })
+ }
+
+ // Test all possible truncations of the encoded byte array, but skip
+ // a truncation that would be valid where omitempty is removed
+ skipN := n - omitEmptyLen(obj)
+ for i := uint64(0); i < n; i++ {
+ if i == skipN {
+ continue
+ }
+
+ t.Run(fmt.Sprintf("%d %s buffer underflow bytes=%d", k, tag, i), func(t *testing.T) {
+ decodeUxHeadExpectError(t, buf[:i], encoder.ErrBufferUnderflow)
+ })
+
+ t.Run(fmt.Sprintf("%d %s exact buffer underflow bytes=%d", k, tag, i), func(t *testing.T) {
+ decodeUxHeadExactExpectError(t, buf[:i], encoder.ErrBufferUnderflow)
+ })
+ }
+
+ // Append 5 bytes for omit empty with a 0 length prefix, to cause an ErrRemainingBytes.
+ // If only 1 byte is appended, the decoder will try to read the 4-byte length prefix,
+ // and return an ErrBufferUnderflow instead
+ if hasOmitEmptyField(obj) {
+ buf = append(buf, []byte{0, 0, 0, 0, 0}...)
+ } else {
+ buf = append(buf, 0)
+ }
+
+ t.Run(fmt.Sprintf("%d %s exact buffer remaining bytes", k, tag), func(t *testing.T) {
+ decodeUxHeadExactExpectError(t, buf, encoder.ErrRemainingBytes)
+ })
+}
+
+func TestSkyencoderUxHeadDecodeErrors(t *testing.T) {
+ rand := mathrand.New(mathrand.NewSource(time.Now().Unix()))
+ n := 10
+
+ for i := 0; i < n; i++ {
+ emptyObj := newEmptyUxHeadForEncodeTest()
+ fullObj := newRandomUxHeadForEncodeTest(t, rand)
+ testSkyencoderUxHeadDecodeErrors(t, i, "empty", emptyObj)
+ testSkyencoderUxHeadDecodeErrors(t, i, "full", fullObj)
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/util/mathutil/mathutil_64bit_test.go b/vendor/github.com/skycoin/skycoin/src/util/mathutil/mathutil_64bit_test.go
new file mode 100644
index 0000000..8afabad
--- /dev/null
+++ b/vendor/github.com/skycoin/skycoin/src/util/mathutil/mathutil_64bit_test.go
@@ -0,0 +1,40 @@
+// +build !386,!amd64p32,!arm,!armbe,!mips,!mipsle,!mips64p32,!mips64p32le,!ppc,!s390,!sparc
+
+package mathutil
+
+import (
+ "fmt"
+ "math"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func Test64BitIntToUint32(t *testing.T) {
+ // Remaining tests defined in TestIntToUint32
+ cases := []struct {
+ a int
+ b uint32
+ err error
+ }{
+ {
+ a: math.MaxUint32 + 1,
+ err: ErrIntOverflowsUint32,
+ },
+ {
+ a: math.MaxUint32,
+ b: math.MaxUint32,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(fmt.Sprint(tc.a), func(t *testing.T) {
+ x, err := IntToUint32(tc.a)
+ if tc.err != nil {
+ require.Equal(t, tc.err, err)
+ } else {
+ require.Equal(t, tc.b, x)
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/skycoin/skycoin/src/coin/math_test.go b/vendor/github.com/skycoin/skycoin/src/util/mathutil/mathutil_test.go
similarity index 70%
rename from vendor/github.com/skycoin/skycoin/src/coin/math_test.go
rename to vendor/github.com/skycoin/skycoin/src/util/mathutil/mathutil_test.go
index c628b4a..6d3e8f0 100644
--- a/vendor/github.com/skycoin/skycoin/src/coin/math_test.go
+++ b/vendor/github.com/skycoin/skycoin/src/util/mathutil/mathutil_test.go
@@ -1,4 +1,4 @@
-package coin
+package mathutil
import (
"fmt"
@@ -18,20 +18,20 @@ func TestAddUint64(t *testing.T) {
}
func TestAddUint32(t *testing.T) {
- n, err := addUint32(10, 11)
+ n, err := AddUint32(10, 11)
require.NoError(t, err)
require.Equal(t, uint32(21), n)
- _, err = addUint32(math.MaxUint32, 1)
+ _, err = AddUint32(math.MaxUint32, 1)
require.Error(t, err)
}
func TestMultUint64(t *testing.T) {
- n, err := multUint64(10, 11)
+ n, err := MultUint64(10, 11)
require.NoError(t, err)
require.Equal(t, uint64(110), n)
- _, err = multUint64(math.MaxUint64/2, 3)
+ _, err = MultUint64(math.MaxUint64/2, 3)
require.Error(t, err)
}
@@ -114,3 +114,40 @@ func TestInt64ToUint64(t *testing.T) {
})
}
}
+
+func TestIntToUint32(t *testing.T) {
+ cases := []struct {
+ a int
+ b uint32
+ err error
+ }{
+ {
+ a: 0,
+ b: 0,
+ },
+ {
+ a: -1,
+ err: ErrIntUnderflowsUint32,
+ },
+ {
+ a: math.MaxInt32,
+ b: math.MaxInt32,
+ },
+ {
+ a: 999,
+ b: 999,
+ },
+ // 64bit test defined in Test64BitIntToUint32
+ }
+
+ for _, tc := range cases {
+ t.Run(fmt.Sprint(tc.a), func(t *testing.T) {
+ x, err := IntToUint32(tc.a)
+ if tc.err != nil {
+ require.Equal(t, tc.err, err)
+ } else {
+ require.Equal(t, tc.b, x)
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/args_test.go b/vendor/github.com/spf13/cobra/args_test.go
index d797b6f..c81b212 100644
--- a/vendor/github.com/spf13/cobra/args_test.go
+++ b/vendor/github.com/spf13/cobra/args_test.go
@@ -158,6 +158,52 @@ func TestExactArgsWithInvalidCount(t *testing.T) {
}
}
+func TestExactValidArgs(t *testing.T) {
+ c := &Command{Use: "c", Args: ExactValidArgs(3), ValidArgs: []string{"a", "b", "c"}, Run: emptyRun}
+ output, err := executeCommand(c, "a", "b", "c")
+ if output != "" {
+ t.Errorf("Unexpected output: %v", output)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+}
+
+func TestExactValidArgsWithInvalidCount(t *testing.T) {
+ c := &Command{Use: "c", Args: ExactValidArgs(2), Run: emptyRun}
+ _, err := executeCommand(c, "a", "b", "c")
+
+ if err == nil {
+ t.Fatal("Expected an error")
+ }
+
+ got := err.Error()
+ expected := "accepts 2 arg(s), received 3"
+ if got != expected {
+ t.Fatalf("Expected %q, got %q", expected, got)
+ }
+}
+
+func TestExactValidArgsWithInvalidArgs(t *testing.T) {
+ c := &Command{
+ Use: "c",
+ Args: ExactValidArgs(1),
+ ValidArgs: []string{"one", "two"},
+ Run: emptyRun,
+ }
+
+ _, err := executeCommand(c, "three")
+ if err == nil {
+ t.Fatal("Expected an error")
+ }
+
+ got := err.Error()
+ expected := `invalid argument "three" for "c"`
+ if got != expected {
+ t.Errorf("Expected: %q, got: %q", expected, got)
+ }
+}
+
func TestRangeArgs(t *testing.T) {
c := &Command{Use: "c", Args: RangeArgs(2, 4), Run: emptyRun}
output, err := executeCommand(c, "a", "b", "c")
diff --git a/vendor/github.com/spf13/cobra/bash_completions_test.go b/vendor/github.com/spf13/cobra/bash_completions_test.go
index 02a4f15..eefa3de 100644
--- a/vendor/github.com/spf13/cobra/bash_completions_test.go
+++ b/vendor/github.com/spf13/cobra/bash_completions_test.go
@@ -22,6 +22,13 @@ func check(t *testing.T, found, expected string) {
}
}
+func checkNumOccurrences(t *testing.T, found, expected string, expectedOccurrences int) {
+ numOccurrences := strings.Count(found, expected)
+ if numOccurrences != expectedOccurrences {
+ t.Errorf("Expecting to contain %d occurrences of: \n %q\nGot %d:\n %q\n", expectedOccurrences, expected, numOccurrences, found)
+ }
+}
+
func checkRegex(t *testing.T, found, pattern string) {
matched, err := regexp.MatchString(pattern, found)
if err != nil {
@@ -53,7 +60,7 @@ func runShellCheck(s string) error {
}
// World worst custom function, just keep telling you to enter hello!
-const bashCompletionFunc = `__custom_func() {
+const bashCompletionFunc = `__root_custom_func() {
COMPREPLY=( "hello" )
}
`
@@ -64,7 +71,7 @@ func TestBashCompletions(t *testing.T) {
ArgAliases: []string{"pods", "nodes", "services", "replicationcontrollers", "po", "no", "svc", "rc"},
ValidArgs: []string{"pod", "node", "service", "replicationcontroller"},
BashCompletionFunction: bashCompletionFunc,
- Run: emptyRun,
+ Run: emptyRun,
}
rootCmd.Flags().IntP("introot", "i", -1, "help message for flag introot")
rootCmd.MarkFlagRequired("introot")
@@ -88,6 +95,10 @@ func TestBashCompletions(t *testing.T) {
rootCmd.Flags().String("theme", "", "theme to use (located in /themes/THEMENAME/)")
rootCmd.Flags().SetAnnotation("theme", BashCompSubdirsInDir, []string{"themes"})
+ // For two word flags check
+ rootCmd.Flags().StringP("two", "t", "", "this is two word flags")
+ rootCmd.Flags().BoolP("two-w-default", "T", false, "this is not two word flags")
+
echoCmd := &Command{
Use: "echo [string to echo]",
Aliases: []string{"say"},
@@ -150,7 +161,10 @@ func TestBashCompletions(t *testing.T) {
// check for required flags
check(t, output, `must_have_one_flag+=("--introot=")`)
check(t, output, `must_have_one_flag+=("--persistent-filename=")`)
- // check for custom completion function
+ // check for custom completion function with both qualified and unqualified name
+ checkNumOccurrences(t, output, `__custom_func`, 2) // 1. check existence, 2. invoke
+ checkNumOccurrences(t, output, `__root_custom_func`, 3) // 1. check existence, 2. invoke, 3. actual definition
+ // check for custom completion function body
check(t, output, `COMPREPLY=( "hello" )`)
// check for required nouns
check(t, output, `must_have_one_noun+=("pod")`)
@@ -173,6 +187,12 @@ func TestBashCompletions(t *testing.T) {
// check for subdirs_in_dir flags in a subcommand
checkRegex(t, output, fmt.Sprintf(`_root_echo\(\)\n{[^}]*flags_completion\+=\("__%s_handle_subdirs_in_dir_flag config"\)`, rootCmd.Name()))
+ // check two word flags
+ check(t, output, `two_word_flags+=("--two")`)
+ check(t, output, `two_word_flags+=("-t")`)
+ checkOmit(t, output, `two_word_flags+=("--two-w-default")`)
+ checkOmit(t, output, `two_word_flags+=("-T")`)
+
checkOmit(t, output, deprecatedCmd.Name())
// If available, run shellcheck against the script.
diff --git a/vendor/github.com/spf13/cobra/command_test.go b/vendor/github.com/spf13/cobra/command_test.go
index ccee031..2fa2003 100644
--- a/vendor/github.com/spf13/cobra/command_test.go
+++ b/vendor/github.com/spf13/cobra/command_test.go
@@ -1164,12 +1164,13 @@ func TestPersistentHooks(t *testing.T) {
t.Errorf("Unexpected error: %v", err)
}
- // TODO: This test fails, but should not.
- // Related to https://github.com/spf13/cobra/issues/252.
- //
- // if parentPersPreArgs != "one two" {
- // t.Errorf("Expected parentPersPreArgs %q, got %q", "one two", parentPersPreArgs)
- // }
+ // TODO: currently PersistenPreRun* defined in parent does not
+ // run if the matchin child subcommand has PersistenPreRun.
+ // If the behavior changes (https://github.com/spf13/cobra/issues/252)
+ // this test must be fixed.
+ if parentPersPreArgs != "" {
+ t.Errorf("Expected blank parentPersPreArgs, got %q", parentPersPreArgs)
+ }
if parentPreArgs != "" {
t.Errorf("Expected blank parentPreArgs, got %q", parentPreArgs)
}
@@ -1179,12 +1180,13 @@ func TestPersistentHooks(t *testing.T) {
if parentPostArgs != "" {
t.Errorf("Expected blank parentPostArgs, got %q", parentPostArgs)
}
- // TODO: This test fails, but should not.
- // Related to https://github.com/spf13/cobra/issues/252.
- //
- // if parentPersPostArgs != "one two" {
- // t.Errorf("Expected parentPersPostArgs %q, got %q", "one two", parentPersPostArgs)
- // }
+ // TODO: currently PersistenPostRun* defined in parent does not
+ // run if the matchin child subcommand has PersistenPostRun.
+ // If the behavior changes (https://github.com/spf13/cobra/issues/252)
+ // this test must be fixed.
+ if parentPersPostArgs != "" {
+ t.Errorf("Expected blank parentPersPostArgs, got %q", parentPersPostArgs)
+ }
if childPersPreArgs != "one two" {
t.Errorf("Expected childPersPreArgs %q, got %q", "one two", childPersPreArgs)
@@ -1379,6 +1381,46 @@ func TestSetOutput(t *testing.T) {
}
}
+func TestSetOut(t *testing.T) {
+ c := &Command{}
+ c.SetOut(nil)
+ if out := c.OutOrStdout(); out != os.Stdout {
+ t.Errorf("Expected setting output to nil to revert back to stdout")
+ }
+}
+
+func TestSetErr(t *testing.T) {
+ c := &Command{}
+ c.SetErr(nil)
+ if out := c.ErrOrStderr(); out != os.Stderr {
+ t.Errorf("Expected setting error to nil to revert back to stderr")
+ }
+}
+
+func TestSetIn(t *testing.T) {
+ c := &Command{}
+ c.SetIn(nil)
+ if out := c.InOrStdin(); out != os.Stdin {
+ t.Errorf("Expected setting input to nil to revert back to stdin")
+ }
+}
+
+func TestUsageStringRedirected(t *testing.T) {
+ c := &Command{}
+
+ c.usageFunc = func(cmd *Command) error {
+ cmd.Print("[stdout1]")
+ cmd.PrintErr("[stderr2]")
+ cmd.Print("[stdout3]")
+ return nil
+ }
+
+ expected := "[stdout1][stderr2][stdout3]"
+ if got := c.UsageString(); got != expected {
+ t.Errorf("Expected usage string to consider both stdout and stderr")
+ }
+}
+
func TestFlagErrorFunc(t *testing.T) {
c := &Command{Use: "c", Run: emptyRun}
diff --git a/vendor/github.com/spf13/cobra/powershell_completions_test.go b/vendor/github.com/spf13/cobra/powershell_completions_test.go
new file mode 100644
index 0000000..29b609d
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/powershell_completions_test.go
@@ -0,0 +1,122 @@
+package cobra
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+)
+
+func TestPowerShellCompletion(t *testing.T) {
+ tcs := []struct {
+ name string
+ root *Command
+ expectedExpressions []string
+ }{
+ {
+ name: "trivial",
+ root: &Command{Use: "trivialapp"},
+ expectedExpressions: []string{
+ "Register-ArgumentCompleter -Native -CommandName 'trivialapp' -ScriptBlock",
+ "$command = @(\n 'trivialapp'\n",
+ },
+ },
+ {
+ name: "tree",
+ root: func() *Command {
+ r := &Command{Use: "tree"}
+
+ sub1 := &Command{Use: "sub1"}
+ r.AddCommand(sub1)
+
+ sub11 := &Command{Use: "sub11"}
+ sub12 := &Command{Use: "sub12"}
+
+ sub1.AddCommand(sub11)
+ sub1.AddCommand(sub12)
+
+ sub2 := &Command{Use: "sub2"}
+ r.AddCommand(sub2)
+
+ sub21 := &Command{Use: "sub21"}
+ sub22 := &Command{Use: "sub22"}
+
+ sub2.AddCommand(sub21)
+ sub2.AddCommand(sub22)
+
+ return r
+ }(),
+ expectedExpressions: []string{
+ "'tree'",
+ "[CompletionResult]::new('sub1', 'sub1', [CompletionResultType]::ParameterValue, '')",
+ "[CompletionResult]::new('sub2', 'sub2', [CompletionResultType]::ParameterValue, '')",
+ "'tree;sub1'",
+ "[CompletionResult]::new('sub11', 'sub11', [CompletionResultType]::ParameterValue, '')",
+ "[CompletionResult]::new('sub12', 'sub12', [CompletionResultType]::ParameterValue, '')",
+ "'tree;sub1;sub11'",
+ "'tree;sub1;sub12'",
+ "'tree;sub2'",
+ "[CompletionResult]::new('sub21', 'sub21', [CompletionResultType]::ParameterValue, '')",
+ "[CompletionResult]::new('sub22', 'sub22', [CompletionResultType]::ParameterValue, '')",
+ "'tree;sub2;sub21'",
+ "'tree;sub2;sub22'",
+ },
+ },
+ {
+ name: "flags",
+ root: func() *Command {
+ r := &Command{Use: "flags"}
+ r.Flags().StringP("flag1", "a", "", "")
+ r.Flags().String("flag2", "", "")
+
+ sub1 := &Command{Use: "sub1"}
+ sub1.Flags().StringP("flag3", "c", "", "")
+ r.AddCommand(sub1)
+
+ return r
+ }(),
+ expectedExpressions: []string{
+ "'flags'",
+ "[CompletionResult]::new('-a', 'a', [CompletionResultType]::ParameterName, '')",
+ "[CompletionResult]::new('--flag1', 'flag1', [CompletionResultType]::ParameterName, '')",
+ "[CompletionResult]::new('--flag2', 'flag2', [CompletionResultType]::ParameterName, '')",
+ "[CompletionResult]::new('sub1', 'sub1', [CompletionResultType]::ParameterValue, '')",
+ "'flags;sub1'",
+ "[CompletionResult]::new('-c', 'c', [CompletionResultType]::ParameterName, '')",
+ "[CompletionResult]::new('--flag3', 'flag3', [CompletionResultType]::ParameterName, '')",
+ },
+ },
+ {
+ name: "usage",
+ root: func() *Command {
+ r := &Command{Use: "usage"}
+ r.Flags().String("flag", "", "this describes the usage of the 'flag' flag")
+
+ sub1 := &Command{
+ Use: "sub1",
+ Short: "short describes 'sub1'",
+ }
+ r.AddCommand(sub1)
+
+ return r
+ }(),
+ expectedExpressions: []string{
+ "[CompletionResult]::new('--flag', 'flag', [CompletionResultType]::ParameterName, 'this describes the usage of the ''flag'' flag')",
+ "[CompletionResult]::new('sub1', 'sub1', [CompletionResultType]::ParameterValue, 'short describes ''sub1''')",
+ },
+ },
+ }
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ buf := new(bytes.Buffer)
+ tc.root.GenPowerShellCompletion(buf)
+ output := buf.String()
+
+ for _, expectedExpression := range tc.expectedExpressions {
+ if !strings.Contains(output, expectedExpression) {
+ t.Errorf("Expected completion to contain %q somewhere; got %q", expectedExpression, output)
+ }
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions_test.go b/vendor/github.com/spf13/cobra/zsh_completions_test.go
index 34e6949..e53fa88 100644
--- a/vendor/github.com/spf13/cobra/zsh_completions_test.go
+++ b/vendor/github.com/spf13/cobra/zsh_completions_test.go
@@ -2,88 +2,474 @@ package cobra
import (
"bytes"
+ "regexp"
"strings"
"testing"
)
-func TestZshCompletion(t *testing.T) {
+func TestGenZshCompletion(t *testing.T) {
+ var debug bool
+ var option string
+
tcs := []struct {
name string
root *Command
expectedExpressions []string
+ invocationArgs []string
+ skip string
}{
{
- name: "trivial",
- root: &Command{Use: "trivialapp"},
- expectedExpressions: []string{"#compdef trivial"},
+ name: "simple command",
+ root: func() *Command {
+ r := &Command{
+ Use: "mycommand",
+ Long: "My Command long description",
+ Run: emptyRun,
+ }
+ r.Flags().BoolVar(&debug, "debug", debug, "description")
+ return r
+ }(),
+ expectedExpressions: []string{
+ `(?s)function _mycommand {\s+_arguments \\\s+'--debug\[description\]'.*--help.*}`,
+ "#compdef _mycommand mycommand",
+ },
},
{
- name: "linear",
+ name: "flags with both long and short flags",
root: func() *Command {
- r := &Command{Use: "linear"}
-
- sub1 := &Command{Use: "sub1"}
- r.AddCommand(sub1)
-
- sub2 := &Command{Use: "sub2"}
- sub1.AddCommand(sub2)
-
- sub3 := &Command{Use: "sub3"}
- sub2.AddCommand(sub3)
+ r := &Command{
+ Use: "testcmd",
+ Long: "long description",
+ Run: emptyRun,
+ }
+ r.Flags().BoolVarP(&debug, "debug", "d", debug, "debug description")
return r
}(),
- expectedExpressions: []string{"sub1", "sub2", "sub3"},
+ expectedExpressions: []string{
+ `'\(-d --debug\)'{-d,--debug}'\[debug description\]'`,
+ },
},
{
- name: "flat",
+ name: "command with subcommands and flags with values",
root: func() *Command {
- r := &Command{Use: "flat"}
- r.AddCommand(&Command{Use: "c1"})
- r.AddCommand(&Command{Use: "c2"})
+ r := &Command{
+ Use: "rootcmd",
+ Long: "Long rootcmd description",
+ }
+ d := &Command{
+ Use: "subcmd1",
+ Short: "Subcmd1 short description",
+ Run: emptyRun,
+ }
+ e := &Command{
+ Use: "subcmd2",
+ Long: "Subcmd2 short description",
+ Run: emptyRun,
+ }
+ r.PersistentFlags().BoolVar(&debug, "debug", debug, "description")
+ d.Flags().StringVarP(&option, "option", "o", option, "option description")
+ r.AddCommand(d, e)
return r
}(),
- expectedExpressions: []string{"(c1 c2)"},
+ expectedExpressions: []string{
+ `commands=\(\n\s+"help:.*\n\s+"subcmd1:.*\n\s+"subcmd2:.*\n\s+\)`,
+ `_arguments \\\n.*'--debug\[description]'`,
+ `_arguments -C \\\n.*'--debug\[description]'`,
+ `function _rootcmd_subcmd1 {`,
+ `function _rootcmd_subcmd1 {`,
+ `_arguments \\\n.*'\(-o --option\)'{-o,--option}'\[option description]:' \\\n`,
+ },
},
{
- name: "tree",
+ name: "filename completion with and without globs",
root: func() *Command {
- r := &Command{Use: "tree"}
-
- sub1 := &Command{Use: "sub1"}
- r.AddCommand(sub1)
-
- sub11 := &Command{Use: "sub11"}
- sub12 := &Command{Use: "sub12"}
-
- sub1.AddCommand(sub11)
- sub1.AddCommand(sub12)
+ var file string
+ r := &Command{
+ Use: "mycmd",
+ Short: "my command short description",
+ Run: emptyRun,
+ }
+ r.Flags().StringVarP(&file, "config", "c", file, "config file")
+ r.MarkFlagFilename("config")
+ r.Flags().String("output", "", "output file")
+ r.MarkFlagFilename("output", "*.log", "*.txt")
+ return r
+ }(),
+ expectedExpressions: []string{
+ `\n +'\(-c --config\)'{-c,--config}'\[config file]:filename:_files'`,
+ `:_files -g "\*.log" -g "\*.txt"`,
+ },
+ },
+ {
+ name: "repeated variables both with and without value",
+ root: func() *Command {
+ r := genTestCommand("mycmd", true)
+ _ = r.Flags().BoolSliceP("debug", "d", []bool{}, "debug usage")
+ _ = r.Flags().StringArray("option", []string{}, "options")
+ return r
+ }(),
+ expectedExpressions: []string{
+ `'\*--option\[options]`,
+ `'\(\*-d \*--debug\)'{\\\*-d,\\\*--debug}`,
+ },
+ },
+ {
+ name: "generated flags --help and --version should be created even when not executing root cmd",
+ root: func() *Command {
+ r := &Command{
+ Use: "mycmd",
+ Short: "mycmd short description",
+ Version: "myversion",
+ }
+ s := genTestCommand("sub1", true)
+ r.AddCommand(s)
+ return s
+ }(),
+ expectedExpressions: []string{
+ "--version",
+ "--help",
+ },
+ invocationArgs: []string{
+ "sub1",
+ },
+ skip: "--version and --help are currently not generated when not running on root command",
+ },
+ {
+ name: "zsh generation should run on root command",
+ root: func() *Command {
+ r := genTestCommand("root", false)
+ s := genTestCommand("sub1", true)
+ r.AddCommand(s)
+ return s
+ }(),
+ expectedExpressions: []string{
+ "function _root {",
+ },
+ },
+ {
+ name: "flag description with single quote (') shouldn't break quotes in completion file",
+ root: func() *Command {
+ r := genTestCommand("root", true)
+ r.Flags().Bool("private", false, "Don't show public info")
+ return r
+ }(),
+ expectedExpressions: []string{
+ `--private\[Don'\\''t show public info]`,
+ },
+ },
+ {
+ name: "argument completion for file with and without patterns",
+ root: func() *Command {
+ r := genTestCommand("root", true)
+ r.MarkZshCompPositionalArgumentFile(1, "*.log")
+ r.MarkZshCompPositionalArgumentFile(2)
+ return r
+ }(),
+ expectedExpressions: []string{
+ `'1: :_files -g "\*.log"' \\\n\s+'2: :_files`,
+ },
+ },
+ {
+ name: "argument zsh completion for words",
+ root: func() *Command {
+ r := genTestCommand("root", true)
+ r.MarkZshCompPositionalArgumentWords(1, "word1", "word2")
+ return r
+ }(),
+ expectedExpressions: []string{
+ `'1: :\("word1" "word2"\)`,
+ },
+ },
+ {
+ name: "argument completion for words with spaces",
+ root: func() *Command {
+ r := genTestCommand("root", true)
+ r.MarkZshCompPositionalArgumentWords(1, "single", "multiple words")
+ return r
+ }(),
+ expectedExpressions: []string{
+ `'1: :\("single" "multiple words"\)'`,
+ },
+ },
+ {
+ name: "argument completion when command has ValidArgs and no annotation for argument completion",
+ root: func() *Command {
+ r := genTestCommand("root", true)
+ r.ValidArgs = []string{"word1", "word2"}
+ return r
+ }(),
+ expectedExpressions: []string{
+ `'1: :\("word1" "word2"\)'`,
+ },
+ },
+ {
+ name: "argument completion when command has ValidArgs and no annotation for argument at argPosition 1",
+ root: func() *Command {
+ r := genTestCommand("root", true)
+ r.ValidArgs = []string{"word1", "word2"}
+ r.MarkZshCompPositionalArgumentFile(2)
+ return r
+ }(),
+ expectedExpressions: []string{
+ `'1: :\("word1" "word2"\)' \\`,
+ },
+ },
+ {
+ name: "directory completion for flag",
+ root: func() *Command {
+ r := genTestCommand("root", true)
+ r.Flags().String("test", "", "test")
+ r.PersistentFlags().String("ptest", "", "ptest")
+ r.MarkFlagDirname("test")
+ r.MarkPersistentFlagDirname("ptest")
+ return r
+ }(),
+ expectedExpressions: []string{
+ `--test\[test]:filename:_files -g "-\(/\)"`,
+ `--ptest\[ptest]:filename:_files -g "-\(/\)"`,
+ },
+ },
+ }
- sub2 := &Command{Use: "sub2"}
- r.AddCommand(sub2)
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ if tc.skip != "" {
+ t.Skip(tc.skip)
+ }
+ tc.root.Root().SetArgs(tc.invocationArgs)
+ tc.root.Execute()
+ buf := new(bytes.Buffer)
+ if err := tc.root.GenZshCompletion(buf); err != nil {
+ t.Error(err)
+ }
+ output := buf.Bytes()
- sub21 := &Command{Use: "sub21"}
- sub22 := &Command{Use: "sub22"}
+ for _, expr := range tc.expectedExpressions {
+ rgx, err := regexp.Compile(expr)
+ if err != nil {
+ t.Errorf("error compiling expression (%s): %v", expr, err)
+ }
+ if !rgx.Match(output) {
+ t.Errorf("expected completion (%s) to match '%s'", buf.String(), expr)
+ }
+ }
+ })
+ }
+}
- sub2.AddCommand(sub21)
- sub2.AddCommand(sub22)
+func TestGenZshCompletionHidden(t *testing.T) {
+ tcs := []struct {
+ name string
+ root *Command
+ expectedExpressions []string
+ }{
+ {
+ name: "hidden commands",
+ root: func() *Command {
+ r := &Command{
+ Use: "main",
+ Short: "main short description",
+ }
+ s1 := &Command{
+ Use: "sub1",
+ Hidden: true,
+ Run: emptyRun,
+ }
+ s2 := &Command{
+ Use: "sub2",
+ Short: "short sub2 description",
+ Run: emptyRun,
+ }
+ r.AddCommand(s1, s2)
return r
}(),
- expectedExpressions: []string{"(sub11 sub12)", "(sub21 sub22)"},
+ expectedExpressions: []string{
+ "sub1",
+ },
+ },
+ {
+ name: "hidden flags",
+ root: func() *Command {
+ var hidden string
+ r := &Command{
+ Use: "root",
+ Short: "root short description",
+ Run: emptyRun,
+ }
+ r.Flags().StringVarP(&hidden, "hidden", "H", hidden, "hidden usage")
+ if err := r.Flags().MarkHidden("hidden"); err != nil {
+ t.Errorf("Error setting flag hidden: %v\n", err)
+ }
+ return r
+ }(),
+ expectedExpressions: []string{
+ "--hidden",
+ },
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
+ tc.root.Execute()
buf := new(bytes.Buffer)
- tc.root.GenZshCompletion(buf)
+ if err := tc.root.GenZshCompletion(buf); err != nil {
+ t.Error(err)
+ }
output := buf.String()
- for _, expectedExpression := range tc.expectedExpressions {
- if !strings.Contains(output, expectedExpression) {
- t.Errorf("Expected completion to contain %q somewhere; got %q", expectedExpression, output)
+ for _, expr := range tc.expectedExpressions {
+ if strings.Contains(output, expr) {
+ t.Errorf("Expected completion (%s) not to contain '%s' but it does", output, expr)
}
}
})
}
}
+
+func TestMarkZshCompPositionalArgumentFile(t *testing.T) {
+ t.Run("Doesn't allow overwriting existing positional argument", func(t *testing.T) {
+ c := &Command{}
+ if err := c.MarkZshCompPositionalArgumentFile(1, "*.log"); err != nil {
+ t.Errorf("Received error when we shouldn't have: %v\n", err)
+ }
+ if err := c.MarkZshCompPositionalArgumentFile(1); err == nil {
+ t.Error("Didn't receive an error when trying to overwrite argument position")
+ }
+ })
+
+ t.Run("Refuses to accept argPosition less then 1", func(t *testing.T) {
+ c := &Command{}
+ err := c.MarkZshCompPositionalArgumentFile(0, "*")
+ if err == nil {
+ t.Fatal("Error was not thrown when indicating argument position 0")
+ }
+ if !strings.Contains(err.Error(), "position") {
+ t.Errorf("expected error message '%s' to contain 'position'", err.Error())
+ }
+ })
+}
+
+func TestMarkZshCompPositionalArgumentWords(t *testing.T) {
+ t.Run("Doesn't allow overwriting existing positional argument", func(t *testing.T) {
+ c := &Command{}
+ if err := c.MarkZshCompPositionalArgumentFile(1, "*.log"); err != nil {
+ t.Errorf("Received error when we shouldn't have: %v\n", err)
+ }
+ if err := c.MarkZshCompPositionalArgumentWords(1, "hello"); err == nil {
+ t.Error("Didn't receive an error when trying to overwrite argument position")
+ }
+ })
+
+ t.Run("Doesn't allow calling without words", func(t *testing.T) {
+ c := &Command{}
+ if err := c.MarkZshCompPositionalArgumentWords(0); err == nil {
+ t.Error("Should not allow saving empty word list for annotation")
+ }
+ })
+
+ t.Run("Refuses to accept argPosition less then 1", func(t *testing.T) {
+ c := &Command{}
+ err := c.MarkZshCompPositionalArgumentWords(0, "word")
+ if err == nil {
+ t.Fatal("Should not allow setting argument position less then 1")
+ }
+ if !strings.Contains(err.Error(), "position") {
+ t.Errorf("Expected error '%s' to contain 'position' but didn't", err.Error())
+ }
+ })
+}
+
+func BenchmarkMediumSizeConstruct(b *testing.B) {
+ root := constructLargeCommandHierarchy()
+ // if err := root.GenZshCompletionFile("_mycmd"); err != nil {
+ // b.Error(err)
+ // }
+
+ for i := 0; i < b.N; i++ {
+ buf := new(bytes.Buffer)
+ err := root.GenZshCompletion(buf)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+}
+
+func TestExtractFlags(t *testing.T) {
+ var debug, cmdc, cmdd bool
+ c := &Command{
+ Use: "cmdC",
+ Long: "Command C",
+ }
+ c.PersistentFlags().BoolVarP(&debug, "debug", "d", debug, "debug mode")
+ c.Flags().BoolVar(&cmdc, "cmd-c", cmdc, "Command C")
+ d := &Command{
+ Use: "CmdD",
+ Long: "Command D",
+ }
+ d.Flags().BoolVar(&cmdd, "cmd-d", cmdd, "Command D")
+ c.AddCommand(d)
+
+ resC := zshCompExtractFlag(c)
+ resD := zshCompExtractFlag(d)
+
+ if len(resC) != 2 {
+ t.Errorf("expected Command C to return 2 flags, got %d", len(resC))
+ }
+ if len(resD) != 2 {
+ t.Errorf("expected Command D to return 2 flags, got %d", len(resD))
+ }
+}
+
+func constructLargeCommandHierarchy() *Command {
+ var config, st1, st2 string
+ var long, debug bool
+ var in1, in2 int
+ var verbose []bool
+
+ r := genTestCommand("mycmd", false)
+ r.PersistentFlags().StringVarP(&config, "config", "c", config, "config usage")
+ if err := r.MarkPersistentFlagFilename("config", "*"); err != nil {
+ panic(err)
+ }
+ s1 := genTestCommand("sub1", true)
+ s1.Flags().BoolVar(&long, "long", long, "long description")
+ s1.Flags().BoolSliceVar(&verbose, "verbose", verbose, "verbose description")
+ s1.Flags().StringArray("option", []string{}, "various options")
+ s2 := genTestCommand("sub2", true)
+ s2.PersistentFlags().BoolVar(&debug, "debug", debug, "debug description")
+ s3 := genTestCommand("sub3", true)
+ s3.Hidden = true
+ s1_1 := genTestCommand("sub1sub1", true)
+ s1_1.Flags().StringVar(&st1, "st1", st1, "st1 description")
+ s1_1.Flags().StringVar(&st2, "st2", st2, "st2 description")
+ s1_2 := genTestCommand("sub1sub2", true)
+ s1_3 := genTestCommand("sub1sub3", true)
+ s1_3.Flags().IntVar(&in1, "int1", in1, "int1 description")
+ s1_3.Flags().IntVar(&in2, "int2", in2, "int2 description")
+ s1_3.Flags().StringArrayP("option", "O", []string{}, "more options")
+ s2_1 := genTestCommand("sub2sub1", true)
+ s2_2 := genTestCommand("sub2sub2", true)
+ s2_3 := genTestCommand("sub2sub3", true)
+ s2_4 := genTestCommand("sub2sub4", true)
+ s2_5 := genTestCommand("sub2sub5", true)
+
+ s1.AddCommand(s1_1, s1_2, s1_3)
+ s2.AddCommand(s2_1, s2_2, s2_3, s2_4, s2_5)
+ r.AddCommand(s1, s2, s3)
+ r.Execute()
+ return r
+}
+
+func genTestCommand(name string, withRun bool) *Command {
+ r := &Command{
+ Use: name,
+ Short: name + " short description",
+ Long: "Long description for " + name,
+ }
+ if withRun {
+ r.Run = emptyRun
+ }
+
+ return r
+}
diff --git a/vendor/github.com/spf13/pflag/bytes_test.go b/vendor/github.com/spf13/pflag/bytes_test.go
index cc4a769..5251f34 100644
--- a/vendor/github.com/spf13/pflag/bytes_test.go
+++ b/vendor/github.com/spf13/pflag/bytes_test.go
@@ -1,6 +1,7 @@
package pflag
import (
+ "encoding/base64"
"fmt"
"os"
"testing"
@@ -61,7 +62,7 @@ func TestBytesHex(t *testing.T) {
} else if tc.success {
bytesHex, err := f.GetBytesHex("bytes")
if err != nil {
- t.Errorf("Got error trying to fetch the IP flag: %v", err)
+ t.Errorf("Got error trying to fetch the 'bytes' flag: %v", err)
}
if fmt.Sprintf("%X", bytesHex) != tc.expected {
t.Errorf("expected %q, got '%X'", tc.expected, bytesHex)
@@ -70,3 +71,64 @@ func TestBytesHex(t *testing.T) {
}
}
}
+
+func setUpBytesBase64(bytesBase64 *[]byte) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.BytesBase64Var(bytesBase64, "bytes", []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}, "Some bytes in Base64")
+ f.BytesBase64VarP(bytesBase64, "bytes2", "B", []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}, "Some bytes in Base64")
+ return f
+}
+
+func TestBytesBase64(t *testing.T) {
+ testCases := []struct {
+ input string
+ success bool
+ expected string
+ }{
+ /// Positive cases
+ {"", true, ""}, // Is empty string OK ?
+ {"AQ==", true, "AQ=="},
+
+ // Negative cases
+ {"AQ", false, ""}, // Padding removed
+ {"ï", false, ""}, // non-base64 characters
+ }
+
+ devnull, _ := os.Open(os.DevNull)
+ os.Stderr = devnull
+
+ for i := range testCases {
+ var bytesBase64 []byte
+ f := setUpBytesBase64(&bytesBase64)
+
+ tc := &testCases[i]
+
+ // --bytes
+ args := []string{
+ fmt.Sprintf("--bytes=%s", tc.input),
+ fmt.Sprintf("-B %s", tc.input),
+ fmt.Sprintf("--bytes2=%s", tc.input),
+ }
+
+ for _, arg := range args {
+ err := f.Parse([]string{arg})
+
+ if err != nil && tc.success == true {
+ t.Errorf("expected success, got %q", err)
+ continue
+ } else if err == nil && tc.success == false {
+ // bytesBase64, err := f.GetBytesBase64("bytes")
+ t.Errorf("expected failure while processing %q", tc.input)
+ continue
+ } else if tc.success {
+ bytesBase64, err := f.GetBytesBase64("bytes")
+ if err != nil {
+ t.Errorf("Got error trying to fetch the 'bytes' flag: %v", err)
+ }
+ if base64.StdEncoding.EncodeToString(bytesBase64) != tc.expected {
+ t.Errorf("expected %q, got '%X'", tc.expected, bytesBase64)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/flag_test.go b/vendor/github.com/spf13/pflag/flag_test.go
index f600f0a..7d02dbc 100644
--- a/vendor/github.com/spf13/pflag/flag_test.go
+++ b/vendor/github.com/spf13/pflag/flag_test.go
@@ -431,6 +431,11 @@ func testParseWithUnknownFlags(f *FlagSet, t *testing.T) {
"--unknown8=unknown8value",
"--boole",
"--unknown6",
+ "",
+ "-uuuuu",
+ "",
+ "--unknown10",
+ "--unknown11",
}
want := []string{
"boola", "true",
diff --git a/vendor/github.com/spf13/pflag/string_to_int_test.go b/vendor/github.com/spf13/pflag/string_to_int_test.go
new file mode 100644
index 0000000..b60bbaf
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_int_test.go
@@ -0,0 +1,156 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of ths2i source code s2i governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "testing"
+)
+
+func setUpS2IFlagSet(s2ip *map[string]int) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.StringToIntVar(s2ip, "s2i", map[string]int{}, "Command separated ls2it!")
+ return f
+}
+
+func setUpS2IFlagSetWithDefault(s2ip *map[string]int) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.StringToIntVar(s2ip, "s2i", map[string]int{"a": 1, "b": 2}, "Command separated ls2it!")
+ return f
+}
+
+func createS2IFlag(vals map[string]int) string {
+ var buf bytes.Buffer
+ i := 0
+ for k, v := range vals {
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(k)
+ buf.WriteRune('=')
+ buf.WriteString(strconv.Itoa(v))
+ i++
+ }
+ return buf.String()
+}
+
+func TestEmptyS2I(t *testing.T) {
+ var s2i map[string]int
+ f := setUpS2IFlagSet(&s2i)
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ getS2I, err := f.GetStringToInt("s2i")
+ if err != nil {
+ t.Fatal("got an error from GetStringToInt():", err)
+ }
+ if len(getS2I) != 0 {
+ t.Fatalf("got s2i %v with len=%d but expected length=0", getS2I, len(getS2I))
+ }
+}
+
+func TestS2I(t *testing.T) {
+ var s2i map[string]int
+ f := setUpS2IFlagSet(&s2i)
+
+ vals := map[string]int{"a": 1, "b": 2, "d": 4, "c": 3}
+ arg := fmt.Sprintf("--s2i=%s", createS2IFlag(vals))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for k, v := range s2i {
+ if vals[k] != v {
+ t.Fatalf("expected s2i[%s] to be %d but got: %d", k, vals[k], v)
+ }
+ }
+ getS2I, err := f.GetStringToInt("s2i")
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ for k, v := range getS2I {
+ if vals[k] != v {
+ t.Fatalf("expected s2i[%s] to be %d but got: %d from GetStringToInt", k, vals[k], v)
+ }
+ }
+}
+
+func TestS2IDefault(t *testing.T) {
+ var s2i map[string]int
+ f := setUpS2IFlagSetWithDefault(&s2i)
+
+ vals := map[string]int{"a": 1, "b": 2}
+
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for k, v := range s2i {
+ if vals[k] != v {
+ t.Fatalf("expected s2i[%s] to be %d but got: %d", k, vals[k], v)
+ }
+ }
+
+ getS2I, err := f.GetStringToInt("s2i")
+ if err != nil {
+ t.Fatal("got an error from GetStringToInt():", err)
+ }
+ for k, v := range getS2I {
+ if vals[k] != v {
+ t.Fatalf("expected s2i[%s] to be %d from GetStringToInt but got: %d", k, vals[k], v)
+ }
+ }
+}
+
+func TestS2IWithDefault(t *testing.T) {
+ var s2i map[string]int
+ f := setUpS2IFlagSetWithDefault(&s2i)
+
+ vals := map[string]int{"a": 1, "b": 2}
+ arg := fmt.Sprintf("--s2i=%s", createS2IFlag(vals))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for k, v := range s2i {
+ if vals[k] != v {
+ t.Fatalf("expected s2i[%s] to be %d but got: %d", k, vals[k], v)
+ }
+ }
+
+ getS2I, err := f.GetStringToInt("s2i")
+ if err != nil {
+ t.Fatal("got an error from GetStringToInt():", err)
+ }
+ for k, v := range getS2I {
+ if vals[k] != v {
+ t.Fatalf("expected s2i[%s] to be %d from GetStringToInt but got: %d", k, vals[k], v)
+ }
+ }
+}
+
+func TestS2ICalledTwice(t *testing.T) {
+ var s2i map[string]int
+ f := setUpS2IFlagSet(&s2i)
+
+ in := []string{"a=1,b=2", "b=3"}
+ expected := map[string]int{"a": 1, "b": 3}
+ argfmt := "--s2i=%s"
+ arg1 := fmt.Sprintf(argfmt, in[0])
+ arg2 := fmt.Sprintf(argfmt, in[1])
+ err := f.Parse([]string{arg1, arg2})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range s2i {
+ if expected[i] != v {
+ t.Fatalf("expected s2i[%s] to be %d but got: %d", i, expected[i], v)
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_string_test.go b/vendor/github.com/spf13/pflag/string_to_string_test.go
new file mode 100644
index 0000000..0777f03
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_string_test.go
@@ -0,0 +1,162 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of ths2s source code s2s governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ "bytes"
+ "encoding/csv"
+ "fmt"
+ "strings"
+ "testing"
+)
+
+func setUpS2SFlagSet(s2sp *map[string]string) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.StringToStringVar(s2sp, "s2s", map[string]string{}, "Command separated ls2st!")
+ return f
+}
+
+func setUpS2SFlagSetWithDefault(s2sp *map[string]string) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.StringToStringVar(s2sp, "s2s", map[string]string{"da": "1", "db": "2", "de": "5,6"}, "Command separated ls2st!")
+ return f
+}
+
+func createS2SFlag(vals map[string]string) string {
+ records := make([]string, 0, len(vals)>>1)
+ for k, v := range vals {
+ records = append(records, k+"="+v)
+ }
+
+ var buf bytes.Buffer
+ w := csv.NewWriter(&buf)
+ if err := w.Write(records); err != nil {
+ panic(err)
+ }
+ w.Flush()
+ return strings.TrimSpace(buf.String())
+}
+
+func TestEmptyS2S(t *testing.T) {
+ var s2s map[string]string
+ f := setUpS2SFlagSet(&s2s)
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ getS2S, err := f.GetStringToString("s2s")
+ if err != nil {
+ t.Fatal("got an error from GetStringToString():", err)
+ }
+ if len(getS2S) != 0 {
+ t.Fatalf("got s2s %v with len=%d but expected length=0", getS2S, len(getS2S))
+ }
+}
+
+func TestS2S(t *testing.T) {
+ var s2s map[string]string
+ f := setUpS2SFlagSet(&s2s)
+
+ vals := map[string]string{"a": "1", "b": "2", "d": "4", "c": "3", "e": "5,6"}
+ arg := fmt.Sprintf("--s2s=%s", createS2SFlag(vals))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for k, v := range s2s {
+ if vals[k] != v {
+ t.Fatalf("expected s2s[%s] to be %s but got: %s", k, vals[k], v)
+ }
+ }
+ getS2S, err := f.GetStringToString("s2s")
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ for k, v := range getS2S {
+ if vals[k] != v {
+ t.Fatalf("expected s2s[%s] to be %s but got: %s from GetStringToString", k, vals[k], v)
+ }
+ }
+}
+
+func TestS2SDefault(t *testing.T) {
+ var s2s map[string]string
+ f := setUpS2SFlagSetWithDefault(&s2s)
+
+ vals := map[string]string{"da": "1", "db": "2", "de": "5,6"}
+
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for k, v := range s2s {
+ if vals[k] != v {
+ t.Fatalf("expected s2s[%s] to be %s but got: %s", k, vals[k], v)
+ }
+ }
+
+ getS2S, err := f.GetStringToString("s2s")
+ if err != nil {
+ t.Fatal("got an error from GetStringToString():", err)
+ }
+ for k, v := range getS2S {
+ if vals[k] != v {
+ t.Fatalf("expected s2s[%s] to be %s from GetStringToString but got: %s", k, vals[k], v)
+ }
+ }
+}
+
+func TestS2SWithDefault(t *testing.T) {
+ var s2s map[string]string
+ f := setUpS2SFlagSetWithDefault(&s2s)
+
+ vals := map[string]string{"a": "1", "b": "2", "e": "5,6"}
+ arg := fmt.Sprintf("--s2s=%s", createS2SFlag(vals))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for k, v := range s2s {
+ if vals[k] != v {
+ t.Fatalf("expected s2s[%s] to be %s but got: %s", k, vals[k], v)
+ }
+ }
+
+ getS2S, err := f.GetStringToString("s2s")
+ if err != nil {
+ t.Fatal("got an error from GetStringToString():", err)
+ }
+ for k, v := range getS2S {
+ if vals[k] != v {
+ t.Fatalf("expected s2s[%s] to be %s from GetStringToString but got: %s", k, vals[k], v)
+ }
+ }
+}
+
+func TestS2SCalledTwice(t *testing.T) {
+ var s2s map[string]string
+ f := setUpS2SFlagSet(&s2s)
+
+ in := []string{"a=1,b=2", "b=3", `"e=5,6"`, `f=7,8`}
+ expected := map[string]string{"a": "1", "b": "3", "e": "5,6", "f": "7,8"}
+ argfmt := "--s2s=%s"
+ arg0 := fmt.Sprintf(argfmt, in[0])
+ arg1 := fmt.Sprintf(argfmt, in[1])
+ arg2 := fmt.Sprintf(argfmt, in[2])
+ arg3 := fmt.Sprintf(argfmt, in[3])
+ err := f.Parse([]string{arg0, arg1, arg2, arg3})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ if len(s2s) != len(expected) {
+ t.Fatalf("expected %d flags; got %d flags", len(expected), len(s2s))
+ }
+ for i, v := range s2s {
+ if expected[i] != v {
+ t.Fatalf("expected s2s[%s] to be %s but got: %s", i, expected[i], v)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go
index d9b77c1..4e7a0c6 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd windows plan9 solaris
+// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd windows plan9 solaris
package terminal
@@ -91,6 +91,12 @@ var keyPressTests = []struct {
{
in: "\x1b[B\r", // down
},
+ {
+ in: "\016\r", // ^P
+ },
+ {
+ in: "\014\r", // ^N
+ },
{
in: "line\x1b[A\x1b[B\r", // up then down
line: "line",
@@ -231,6 +237,49 @@ func TestKeyPresses(t *testing.T) {
}
}
+var renderTests = []struct {
+ in string
+ received string
+ err error
+}{
+ {
+ // Cursor move after keyHome (left 4) then enter (right 4, newline)
+ in: "abcd\x1b[H\r",
+ received: "> abcd\x1b[4D\x1b[4C\r\n",
+ },
+ {
+ // Write, home, prepend, enter. Prepends rewrites the line.
+ in: "cdef\x1b[Hab\r",
+ received: "> cdef" + // Initial input
+ "\x1b[4Da" + // Move cursor back, insert first char
+ "cdef" + // Copy over original string
+ "\x1b[4Dbcdef" + // Repeat for second char with copy
+ "\x1b[4D" + // Put cursor back in position to insert again
+ "\x1b[4C\r\n", // Put cursor at the end of the line and newline.
+ },
+}
+
+func TestRender(t *testing.T) {
+ for i, test := range renderTests {
+ for j := 1; j < len(test.in); j++ {
+ c := &MockTerminal{
+ toSend: []byte(test.in),
+ bytesPerRead: j,
+ }
+ ss := NewTerminal(c, "> ")
+ _, err := ss.ReadLine()
+ if err != test.err {
+ t.Errorf("Error resulting from test %d (%d bytes per read) was '%v', expected '%v'", i, j, err, test.err)
+ break
+ }
+ if test.received != string(c.received) {
+ t.Errorf("Results rendered from test %d (%d bytes per read) was '%s', expected '%s'", i, j, c.received, test.received)
+ break
+ }
+ }
+ }
+}
+
func TestPasswordNotSaved(t *testing.T) {
c := &MockTerminal{
toSend: []byte("password\r\x1b[A\r"),
diff --git a/vendor/golang.org/x/sys/unix/darwin_test.go b/vendor/golang.org/x/sys/unix/darwin_test.go
new file mode 100644
index 0000000..29af36f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/darwin_test.go
@@ -0,0 +1,210 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin,go1.12,amd64 darwin,go1.12,386
+
+package unix
+
+import (
+ "os"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+type darwinTest struct {
+ name string
+ f func()
+}
+
+// TODO(khr): decide whether to keep this test enabled permanently or
+// only temporarily.
+func TestDarwinLoader(t *testing.T) {
+ // Make sure the Darwin dynamic loader can actually resolve
+ // all the system calls into libSystem.dylib. Unfortunately
+ // there is no easy way to test this at compile time. So we
+ // implement a crazy hack here, calling into the syscall
+ // function with all its arguments set to junk, and see what
+ // error we get. We are happy with any error (or none) except
+ // an error from the dynamic loader.
+ //
+ // We have to run each test in a separate subprocess for fault isolation.
+ //
+ // Hopefully the junk args won't accidentally ask the system to do "rm -fr /".
+ //
+ // In an ideal world each syscall would have its own test, so this test
+ // would be unnecessary. Unfortunately, we do not live in that world.
+ for _, test := range darwinTests {
+ // Call the test binary recursively, giving it a magic argument
+ // (see init below) and the name of the test to run.
+ cmd := exec.Command(os.Args[0], "testDarwinLoader", test.name)
+
+ // Run subprocess, collect results. Note that we expect the subprocess
+ // to fail somehow, so the error is irrelevant.
+ out, _ := cmd.CombinedOutput()
+
+ if strings.Contains(string(out), "dyld: Symbol not found:") {
+ t.Errorf("can't resolve %s in libSystem.dylib", test.name)
+ }
+ if !strings.Contains(string(out), "success") {
+ // Not really an error. Might be a syscall that never returns,
+ // like exit, or one that segfaults, like gettimeofday.
+ t.Logf("test never finished: %s: %s", test.name, string(out))
+ }
+ }
+}
+
+func init() {
+ // The test binary execs itself with the "testDarwinLoader" argument.
+ // Run the test specified by os.Args[2], then panic.
+ if len(os.Args) >= 3 && os.Args[1] == "testDarwinLoader" {
+ for _, test := range darwinTests {
+ if test.name == os.Args[2] {
+ test.f()
+ }
+ }
+ // Panic with a "success" label, so the parent process can check it.
+ panic("success")
+ }
+}
+
+// All the _trampoline functions in zsyscall_darwin_$ARCH.s
+var darwinTests = [...]darwinTest{
+ {"getgroups", libc_getgroups_trampoline},
+ {"setgroups", libc_setgroups_trampoline},
+ {"wait4", libc_wait4_trampoline},
+ {"accept", libc_accept_trampoline},
+ {"bind", libc_bind_trampoline},
+ {"connect", libc_connect_trampoline},
+ {"socket", libc_socket_trampoline},
+ {"getsockopt", libc_getsockopt_trampoline},
+ {"setsockopt", libc_setsockopt_trampoline},
+ {"getpeername", libc_getpeername_trampoline},
+ {"getsockname", libc_getsockname_trampoline},
+ {"shutdown", libc_shutdown_trampoline},
+ {"socketpair", libc_socketpair_trampoline},
+ {"recvfrom", libc_recvfrom_trampoline},
+ {"sendto", libc_sendto_trampoline},
+ {"recvmsg", libc_recvmsg_trampoline},
+ {"sendmsg", libc_sendmsg_trampoline},
+ {"kevent", libc_kevent_trampoline},
+ {"__sysctl", libc___sysctl_trampoline},
+ {"utimes", libc_utimes_trampoline},
+ {"futimes", libc_futimes_trampoline},
+ {"fcntl", libc_fcntl_trampoline},
+ {"poll", libc_poll_trampoline},
+ {"madvise", libc_madvise_trampoline},
+ {"mlock", libc_mlock_trampoline},
+ {"mlockall", libc_mlockall_trampoline},
+ {"mprotect", libc_mprotect_trampoline},
+ {"msync", libc_msync_trampoline},
+ {"munlock", libc_munlock_trampoline},
+ {"munlockall", libc_munlockall_trampoline},
+ {"ptrace", libc_ptrace_trampoline},
+ {"pipe", libc_pipe_trampoline},
+ {"getxattr", libc_getxattr_trampoline},
+ {"fgetxattr", libc_fgetxattr_trampoline},
+ {"setxattr", libc_setxattr_trampoline},
+ {"fsetxattr", libc_fsetxattr_trampoline},
+ {"removexattr", libc_removexattr_trampoline},
+ {"fremovexattr", libc_fremovexattr_trampoline},
+ {"listxattr", libc_listxattr_trampoline},
+ {"flistxattr", libc_flistxattr_trampoline},
+ {"kill", libc_kill_trampoline},
+ {"ioctl", libc_ioctl_trampoline},
+ {"access", libc_access_trampoline},
+ {"adjtime", libc_adjtime_trampoline},
+ {"chdir", libc_chdir_trampoline},
+ {"chflags", libc_chflags_trampoline},
+ {"chmod", libc_chmod_trampoline},
+ {"chown", libc_chown_trampoline},
+ {"chroot", libc_chroot_trampoline},
+ {"close", libc_close_trampoline},
+ {"dup", libc_dup_trampoline},
+ {"dup2", libc_dup2_trampoline},
+ {"exchangedata", libc_exchangedata_trampoline},
+ {"exit", libc_exit_trampoline},
+ {"faccessat", libc_faccessat_trampoline},
+ {"fchdir", libc_fchdir_trampoline},
+ {"fchflags", libc_fchflags_trampoline},
+ {"fchmod", libc_fchmod_trampoline},
+ {"fchmodat", libc_fchmodat_trampoline},
+ {"fchown", libc_fchown_trampoline},
+ {"fchownat", libc_fchownat_trampoline},
+ {"flock", libc_flock_trampoline},
+ {"fpathconf", libc_fpathconf_trampoline},
+ {"fstat64", libc_fstat64_trampoline},
+ {"fstatat64", libc_fstatat64_trampoline},
+ {"fstatfs64", libc_fstatfs64_trampoline},
+ {"fsync", libc_fsync_trampoline},
+ {"ftruncate", libc_ftruncate_trampoline},
+ {"__getdirentries64", libc___getdirentries64_trampoline},
+ {"getdtablesize", libc_getdtablesize_trampoline},
+ {"getegid", libc_getegid_trampoline},
+ {"geteuid", libc_geteuid_trampoline},
+ {"getgid", libc_getgid_trampoline},
+ {"getpgid", libc_getpgid_trampoline},
+ {"getpgrp", libc_getpgrp_trampoline},
+ {"getpid", libc_getpid_trampoline},
+ {"getppid", libc_getppid_trampoline},
+ {"getpriority", libc_getpriority_trampoline},
+ {"getrlimit", libc_getrlimit_trampoline},
+ {"getrusage", libc_getrusage_trampoline},
+ {"getsid", libc_getsid_trampoline},
+ {"getuid", libc_getuid_trampoline},
+ {"issetugid", libc_issetugid_trampoline},
+ {"kqueue", libc_kqueue_trampoline},
+ {"lchown", libc_lchown_trampoline},
+ {"link", libc_link_trampoline},
+ {"linkat", libc_linkat_trampoline},
+ {"listen", libc_listen_trampoline},
+ {"lstat64", libc_lstat64_trampoline},
+ {"mkdir", libc_mkdir_trampoline},
+ {"mkdirat", libc_mkdirat_trampoline},
+ {"mkfifo", libc_mkfifo_trampoline},
+ {"mknod", libc_mknod_trampoline},
+ {"open", libc_open_trampoline},
+ {"openat", libc_openat_trampoline},
+ {"pathconf", libc_pathconf_trampoline},
+ {"pread", libc_pread_trampoline},
+ {"pwrite", libc_pwrite_trampoline},
+ {"read", libc_read_trampoline},
+ {"readlink", libc_readlink_trampoline},
+ {"readlinkat", libc_readlinkat_trampoline},
+ {"rename", libc_rename_trampoline},
+ {"renameat", libc_renameat_trampoline},
+ {"revoke", libc_revoke_trampoline},
+ {"rmdir", libc_rmdir_trampoline},
+ {"lseek", libc_lseek_trampoline},
+ {"select", libc_select_trampoline},
+ {"setegid", libc_setegid_trampoline},
+ {"seteuid", libc_seteuid_trampoline},
+ {"setgid", libc_setgid_trampoline},
+ {"setlogin", libc_setlogin_trampoline},
+ {"setpgid", libc_setpgid_trampoline},
+ {"setpriority", libc_setpriority_trampoline},
+ {"setprivexec", libc_setprivexec_trampoline},
+ {"setregid", libc_setregid_trampoline},
+ {"setreuid", libc_setreuid_trampoline},
+ {"setrlimit", libc_setrlimit_trampoline},
+ {"setsid", libc_setsid_trampoline},
+ {"settimeofday", libc_settimeofday_trampoline},
+ {"setuid", libc_setuid_trampoline},
+ {"stat64", libc_stat64_trampoline},
+ {"statfs64", libc_statfs64_trampoline},
+ {"symlink", libc_symlink_trampoline},
+ {"symlinkat", libc_symlinkat_trampoline},
+ {"sync", libc_sync_trampoline},
+ {"truncate", libc_truncate_trampoline},
+ {"umask", libc_umask_trampoline},
+ {"undelete", libc_undelete_trampoline},
+ {"unlink", libc_unlink_trampoline},
+ {"unlinkat", libc_unlinkat_trampoline},
+ {"unmount", libc_unmount_trampoline},
+ {"write", libc_write_trampoline},
+ {"mmap", libc_mmap_trampoline},
+ {"munmap", libc_munmap_trampoline},
+ {"gettimeofday", libc_gettimeofday_trampoline},
+ {"getfsstat64", libc_getfsstat64_trampoline},
+}
diff --git a/vendor/golang.org/x/sys/unix/dirent_test.go b/vendor/golang.org/x/sys/unix/dirent_test.go
new file mode 100644
index 0000000..48eb257
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dirent_test.go
@@ -0,0 +1,150 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix_test
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestDirent(t *testing.T) {
+ const (
+ direntBufSize = 2048
+ filenameMinSize = 11
+ )
+
+ d, err := ioutil.TempDir("", "dirent-test")
+ if err != nil {
+ t.Fatalf("tempdir: %v", err)
+ }
+ defer os.RemoveAll(d)
+ t.Logf("tmpdir: %s", d)
+
+ for i, c := range []byte("0123456789") {
+ name := string(bytes.Repeat([]byte{c}, filenameMinSize+i))
+ err = ioutil.WriteFile(filepath.Join(d, name), nil, 0644)
+ if err != nil {
+ t.Fatalf("writefile: %v", err)
+ }
+ }
+
+ buf := bytes.Repeat([]byte("DEADBEAF"), direntBufSize/8)
+ fd, err := unix.Open(d, unix.O_RDONLY, 0)
+ if err != nil {
+ t.Fatalf("Open: %v", err)
+ }
+ defer unix.Close(fd)
+ n, err := unix.ReadDirent(fd, buf)
+ if err != nil {
+ t.Fatalf("ReadDirent: %v", err)
+ }
+ buf = buf[:n]
+
+ names := make([]string, 0, 10)
+ for len(buf) > 0 {
+ var bc int
+ bc, _, names = unix.ParseDirent(buf, -1, names)
+ if bc == 0 && len(buf) > 0 {
+ t.Fatal("no progress")
+ }
+ buf = buf[bc:]
+ }
+
+ sort.Strings(names)
+ t.Logf("names: %q", names)
+
+ if len(names) != 10 {
+ t.Errorf("got %d names; expected 10", len(names))
+ }
+ for i, name := range names {
+ ord, err := strconv.Atoi(name[:1])
+ if err != nil {
+ t.Fatalf("names[%d] is non-integer %q: %v", i, names[i], err)
+ }
+ if expected := string(strings.Repeat(name[:1], filenameMinSize+ord)); name != expected {
+ t.Errorf("names[%d] is %q (len %d); expected %q (len %d)", i, name, len(name), expected, len(expected))
+ }
+ }
+}
+
+func TestDirentRepeat(t *testing.T) {
+ const N = 100
+ // Note: the size of the buffer is small enough that the loop
+ // below will need to execute multiple times. See issue #31368.
+ size := N * unsafe.Offsetof(unix.Dirent{}.Name) / 4
+ if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" {
+ if size < 1024 {
+ size = 1024 // DIRBLKSIZ, see issue 31403.
+ }
+ if runtime.GOOS == "freebsd" {
+ t.Skip("need to fix issue 31416 first")
+ }
+ }
+
+ // Make a directory containing N files
+ d, err := ioutil.TempDir("", "direntRepeat-test")
+ if err != nil {
+ t.Fatalf("tempdir: %v", err)
+ }
+ defer os.RemoveAll(d)
+
+ var files []string
+ for i := 0; i < N; i++ {
+ files = append(files, fmt.Sprintf("file%d", i))
+ }
+ for _, file := range files {
+ err = ioutil.WriteFile(filepath.Join(d, file), []byte("contents"), 0644)
+ if err != nil {
+ t.Fatalf("writefile: %v", err)
+ }
+ }
+
+ // Read the directory entries using ReadDirent.
+ fd, err := unix.Open(d, unix.O_RDONLY, 0)
+ if err != nil {
+ t.Fatalf("Open: %v", err)
+ }
+ defer unix.Close(fd)
+ var files2 []string
+ for {
+ buf := make([]byte, size)
+ n, err := unix.ReadDirent(fd, buf)
+ if err != nil {
+ t.Fatalf("ReadDirent: %v", err)
+ }
+ if n == 0 {
+ break
+ }
+ buf = buf[:n]
+ for len(buf) > 0 {
+ var consumed int
+ consumed, _, files2 = unix.ParseDirent(buf, -1, files2)
+ if consumed == 0 && len(buf) > 0 {
+ t.Fatal("no progress")
+ }
+ buf = buf[consumed:]
+ }
+ }
+
+ // Check results
+ sort.Strings(files)
+ sort.Strings(files2)
+ if strings.Join(files, "|") != strings.Join(files2, "|") {
+ t.Errorf("bad file list: want\n%q\ngot\n%q", files, files2)
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/example_test.go b/vendor/golang.org/x/sys/unix/example_exec_test.go
similarity index 83%
rename from vendor/golang.org/x/sys/unix/example_test.go
rename to vendor/golang.org/x/sys/unix/example_exec_test.go
index 10619af..bb4d3bf 100644
--- a/vendor/golang.org/x/sys/unix/example_test.go
+++ b/vendor/golang.org/x/sys/unix/example_exec_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package unix_test
diff --git a/vendor/golang.org/x/sys/unix/example_flock_test.go b/vendor/golang.org/x/sys/unix/example_flock_test.go
new file mode 100644
index 0000000..6c91748
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/example_flock_test.go
@@ -0,0 +1,25 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix_test
+
+import (
+ "log"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+func ExampleFlock() {
+ f, _ := os.Create("example.lock")
+ if err := unix.Flock(int(f.Fd()), unix.LOCK_EX); err != nil {
+ log.Fatal(err)
+ }
+ // Do work here that requires the lock. When finished, release the lock:
+ if err := unix.Flock(int(f.Fd()), unix.LOCK_UN); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/export_test.go b/vendor/golang.org/x/sys/unix/export_test.go
index e802469..f8ae0e0 100644
--- a/vendor/golang.org/x/sys/unix/export_test.go
+++ b/vendor/golang.org/x/sys/unix/export_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package unix
diff --git a/vendor/golang.org/x/sys/unix/getdirentries_test.go b/vendor/golang.org/x/sys/unix/getdirentries_test.go
new file mode 100644
index 0000000..7e9600b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/getdirentries_test.go
@@ -0,0 +1,83 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd openbsd netbsd
+
+package unix_test
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestGetdirentries(t *testing.T) {
+ for _, count := range []int{10, 1000} {
+ t.Run(fmt.Sprintf("n=%d", count), func(t *testing.T) {
+ testGetdirentries(t, count)
+ })
+ }
+}
+func testGetdirentries(t *testing.T, count int) {
+ if count > 100 && testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" {
+ t.Skip("skipping in -short mode")
+ }
+ d, err := ioutil.TempDir("", "getdirentries-test")
+ if err != nil {
+ t.Fatalf("Tempdir: %v", err)
+ }
+ defer os.RemoveAll(d)
+ var names []string
+ for i := 0; i < count; i++ {
+ names = append(names, fmt.Sprintf("file%03d", i))
+ }
+
+ // Make files in the temp directory
+ for _, name := range names {
+ err := ioutil.WriteFile(filepath.Join(d, name), []byte("data"), 0)
+ if err != nil {
+ t.Fatalf("WriteFile: %v", err)
+ }
+ }
+
+ // Read files using Getdirentries
+ fd, err := unix.Open(d, unix.O_RDONLY, 0)
+ if err != nil {
+ t.Fatalf("Open: %v", err)
+ }
+ defer unix.Close(fd)
+ var base uintptr
+ var buf [2048]byte
+ names2 := make([]string, 0, count)
+ for {
+ n, err := unix.Getdirentries(fd, buf[:], &base)
+ if err != nil {
+ t.Fatalf("Getdirentries: %v", err)
+ }
+ if n == 0 {
+ break
+ }
+ data := buf[:n]
+ for len(data) > 0 {
+ var bc int
+ bc, _, names2 = unix.ParseDirent(data, -1, names2)
+ if bc == 0 && len(data) > 0 {
+ t.Fatal("no progress")
+ }
+ data = data[bc:]
+ }
+ }
+
+ sort.Strings(names)
+ sort.Strings(names2)
+ if strings.Join(names, ":") != strings.Join(names2, ":") {
+ t.Errorf("names don't match\n names: %q\nnames2: %q", names, names2)
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/mmap_unix_test.go b/vendor/golang.org/x/sys/unix/mmap_unix_test.go
index 3258ca3..d4c4ef9 100644
--- a/vendor/golang.org/x/sys/unix/mmap_unix_test.go
+++ b/vendor/golang.org/x/sys/unix/mmap_unix_test.go
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package unix_test
import (
+ "runtime"
"testing"
"golang.org/x/sys/unix"
@@ -23,9 +24,14 @@ func TestMmap(t *testing.T) {
b[0] = 42
- if err := unix.Msync(b, unix.MS_SYNC); err != nil {
- t.Fatalf("Msync: %v", err)
+ if runtime.GOOS == "aix" {
+ t.Skip("msync returns invalid argument for AIX, skipping msync test")
+ } else {
+ if err := unix.Msync(b, unix.MS_SYNC); err != nil {
+ t.Fatalf("Msync: %v", err)
+ }
}
+
if err := unix.Madvise(b, unix.MADV_DONTNEED); err != nil {
t.Fatalf("Madvise: %v", err)
}
diff --git a/vendor/golang.org/x/sys/unix/openbsd_test.go b/vendor/golang.org/x/sys/unix/openbsd_test.go
index 734d765..3ded960 100644
--- a/vendor/golang.org/x/sys/unix/openbsd_test.go
+++ b/vendor/golang.org/x/sys/unix/openbsd_test.go
@@ -87,7 +87,7 @@ func TestMain(m *testing.M) {
func init() {
testProcs["pledge"] = testProc{
func() {
- fmt.Println(unix.Pledge("", nil))
+ fmt.Println(unix.Pledge("", ""))
os.Exit(0)
},
func() error {
diff --git a/vendor/golang.org/x/sys/unix/sendfile_test.go b/vendor/golang.org/x/sys/unix/sendfile_test.go
new file mode 100644
index 0000000..d41fb93
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/sendfile_test.go
@@ -0,0 +1,98 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin,amd64 darwin,386 dragonfly freebsd linux solaris
+
+package unix_test
+
+import (
+ "io/ioutil"
+ "net"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestSendfile(t *testing.T) {
+ // Set up source data file.
+ tempDir, err := ioutil.TempDir("", "TestSendfile")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tempDir)
+ name := filepath.Join(tempDir, "source")
+ const contents = "contents"
+ err = ioutil.WriteFile(name, []byte(contents), 0666)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ done := make(chan bool)
+
+ // Start server listening on a socket.
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Skipf("listen failed: %s\n", err)
+ }
+ defer ln.Close()
+ go func() {
+ conn, err := ln.Accept()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer conn.Close()
+ b, err := ioutil.ReadAll(conn)
+ if string(b) != contents {
+ t.Errorf("contents not transmitted: got %s (len=%d), want %s", string(b), len(b), contents)
+ }
+ done <- true
+ }()
+
+ // Open source file.
+ src, err := os.Open(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Send source file to server.
+ conn, err := net.Dial("tcp", ln.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ file, err := conn.(*net.TCPConn).File()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var off int64
+ n, err := unix.Sendfile(int(file.Fd()), int(src.Fd()), &off, len(contents))
+ if err != nil {
+ t.Errorf("Sendfile failed %s\n", err)
+ }
+ if n != len(contents) {
+ t.Errorf("written count wrong: want %d, got %d", len(contents), n)
+ }
+ // Note: off is updated on some systems and not others. Oh well.
+ // Linux: increments off by the amount sent.
+ // Darwin: leaves off unchanged.
+ // It would be nice to fix Darwin if we can.
+ if off != 0 && off != int64(len(contents)) {
+ t.Errorf("offset wrong: god %d, want %d or %d", off, 0, len(contents))
+ }
+ // The cursor position should be unchanged.
+ pos, err := src.Seek(0, 1)
+ if err != nil {
+ t.Errorf("can't get cursor position %s\n", err)
+ }
+ if pos != 0 {
+ t.Errorf("cursor position wrong: got %d, want 0", pos)
+ }
+
+ file.Close() // Note: required to have the close below really send EOF to the server.
+ conn.Close()
+
+ // Wait for server to close.
+ <-done
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_test.go b/vendor/golang.org/x/sys/unix/syscall_aix_test.go
new file mode 100644
index 0000000..6787643
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_aix_test.go
@@ -0,0 +1,168 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix
+
+package unix_test
+
+import (
+ "os"
+ "runtime"
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestIoctlGetInt(t *testing.T) {
+ f, err := os.Open("/dev/random")
+ if err != nil {
+ t.Fatalf("failed to open device: %v", err)
+ }
+ defer f.Close()
+
+ v, err := unix.IoctlGetInt(int(f.Fd()), unix.RNDGETENTCNT)
+ if err != nil {
+ t.Fatalf("failed to perform ioctl: %v", err)
+ }
+
+ t.Logf("%d bits of entropy available", v)
+}
+
+func TestTime(t *testing.T) {
+ var ut unix.Time_t
+ ut2, err := unix.Time(&ut)
+ if err != nil {
+ t.Fatalf("Time: %v", err)
+ }
+ if ut != ut2 {
+ t.Errorf("Time: return value %v should be equal to argument %v", ut2, ut)
+ }
+
+ var now time.Time
+
+ for i := 0; i < 10; i++ {
+ ut, err = unix.Time(nil)
+ if err != nil {
+ t.Fatalf("Time: %v", err)
+ }
+
+ now = time.Now()
+
+ if int64(ut) == now.Unix() {
+ return
+ }
+ }
+
+ t.Errorf("Time: return value %v should be nearly equal to time.Now().Unix() %v", ut, now.Unix())
+}
+
+func TestUtime(t *testing.T) {
+ defer chtmpdir(t)()
+
+ touch(t, "file1")
+
+ buf := &unix.Utimbuf{
+ Modtime: 12345,
+ }
+
+ err := unix.Utime("file1", buf)
+ if err != nil {
+ t.Fatalf("Utime: %v", err)
+ }
+
+ fi, err := os.Stat("file1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if fi.ModTime().Unix() != 12345 {
+ t.Errorf("Utime: failed to change modtime: expected %v, got %v", 12345, fi.ModTime().Unix())
+ }
+}
+
+func TestUtimesNanoAt(t *testing.T) {
+ defer chtmpdir(t)()
+
+ symlink := "symlink1"
+ defer os.Remove(symlink)
+ err := os.Symlink("nonexisting", symlink)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ts := []unix.Timespec{
+ {Sec: 1111, Nsec: 2222},
+ {Sec: 3333, Nsec: 4444},
+ }
+ err = unix.UtimesNanoAt(unix.AT_FDCWD, symlink, ts, unix.AT_SYMLINK_NOFOLLOW)
+ if err != nil {
+ t.Fatalf("UtimesNanoAt: %v", err)
+ }
+
+ var st unix.Stat_t
+ err = unix.Lstat(symlink, &st)
+ if err != nil {
+ t.Fatalf("Lstat: %v", err)
+ }
+ if runtime.GOARCH == "ppc64" {
+ if int64(st.Atim.Sec) != int64(ts[0].Sec) || st.Atim.Nsec != ts[0].Nsec {
+ t.Errorf("UtimesNanoAt: wrong atime: %v", st.Atim)
+ }
+ if int64(st.Mtim.Sec) != int64(ts[1].Sec) || st.Mtim.Nsec != ts[1].Nsec {
+ t.Errorf("UtimesNanoAt: wrong mtime: %v", st.Mtim)
+ }
+ } else {
+ if int32(st.Atim.Sec) != int32(ts[0].Sec) || int32(st.Atim.Nsec) != int32(ts[0].Nsec) {
+ t.Errorf("UtimesNanoAt: wrong atime: %v", st.Atim)
+ }
+ if int32(st.Mtim.Sec) != int32(ts[1].Sec) || int32(st.Mtim.Nsec) != int32(ts[1].Nsec) {
+ t.Errorf("UtimesNanoAt: wrong mtime: %v", st.Mtim)
+ }
+ }
+}
+
+func TestSelect(t *testing.T) {
+ _, err := unix.Select(0, nil, nil, nil, &unix.Timeval{Sec: 0, Usec: 0})
+ if err != nil {
+ t.Fatalf("Select: %v", err)
+ }
+
+ dur := 150 * time.Millisecond
+ tv := unix.NsecToTimeval(int64(dur))
+ start := time.Now()
+ _, err = unix.Select(0, nil, nil, nil, &tv)
+ took := time.Since(start)
+ if err != nil {
+ t.Fatalf("Select: %v", err)
+ }
+
+ if took < dur {
+ t.Errorf("Select: timeout should have been at least %v, got %v", dur, took)
+ }
+}
+
+func TestPselect(t *testing.T) {
+ if runtime.GOARCH == "ppc64" {
+ t.Skip("pselect issue with structure timespec on AIX 7.2 tl0, skipping test")
+ }
+
+ _, err := unix.Pselect(0, nil, nil, nil, &unix.Timespec{Sec: 0, Nsec: 0}, nil)
+ if err != nil {
+ t.Fatalf("Pselect: %v", err)
+ }
+
+ dur := 2500 * time.Microsecond
+ ts := unix.NsecToTimespec(int64(dur))
+ start := time.Now()
+ _, err = unix.Pselect(0, nil, nil, nil, &ts, nil)
+ took := time.Since(start)
+ if err != nil {
+ t.Fatalf("Pselect: %v", err)
+ }
+
+ if took < dur {
+ t.Errorf("Pselect: timeout should have been at least %v, got %v", dur, took)
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd_test.go b/vendor/golang.org/x/sys/unix/syscall_bsd_test.go
index 6c4e2ac..12924cb 100644
--- a/vendor/golang.org/x/sys/unix/syscall_bsd_test.go
+++ b/vendor/golang.org/x/sys/unix/syscall_bsd_test.go
@@ -15,18 +15,14 @@ import (
"golang.org/x/sys/unix"
)
-const MNT_WAIT = 1
-const MNT_NOWAIT = 2
-
func TestGetfsstat(t *testing.T) {
- const flags = MNT_NOWAIT // see golang.org/issue/16937
- n, err := unix.Getfsstat(nil, flags)
+ n, err := unix.Getfsstat(nil, unix.MNT_NOWAIT)
if err != nil {
t.Fatal(err)
}
data := make([]unix.Statfs_t, n)
- n2, err := unix.Getfsstat(data, flags)
+ n2, err := unix.Getfsstat(data, unix.MNT_NOWAIT)
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_test.go b/vendor/golang.org/x/sys/unix/syscall_darwin_test.go
index 65691d5..f24273d 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_test.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_test.go
@@ -4,6 +4,13 @@
package unix_test
+import (
+ "os"
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
// stringsFromByteSlice converts a sequence of attributes to a []string.
// On Darwin, each entry is a NULL-terminated string.
func stringsFromByteSlice(buf []byte) []string {
@@ -17,3 +24,49 @@ func stringsFromByteSlice(buf []byte) []string {
}
return result
}
+
+func TestUtimesNanoAt(t *testing.T) {
+ defer chtmpdir(t)()
+
+ symlink := "symlink1"
+ os.Remove(symlink)
+ err := os.Symlink("nonexisting", symlink)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ts := []unix.Timespec{
+ {Sec: 1111, Nsec: 2222},
+ {Sec: 3333, Nsec: 4444},
+ }
+ err = unix.UtimesNanoAt(unix.AT_FDCWD, symlink, ts, unix.AT_SYMLINK_NOFOLLOW)
+ if err != nil {
+ t.Fatalf("UtimesNanoAt: %v", err)
+ }
+
+ var st unix.Stat_t
+ err = unix.Lstat(symlink, &st)
+ if err != nil {
+ t.Fatalf("Lstat: %v", err)
+ }
+
+ // Only check Mtim, Atim might not be supported by the underlying filesystem
+ expected := ts[1]
+ if st.Mtim.Nsec == 0 {
+ // Some filesystems only support 1-second time stamp resolution
+ // and will always set Nsec to 0.
+ expected.Nsec = 0
+ }
+ if st.Mtim != expected {
+ t.Errorf("UtimesNanoAt: wrong mtime: got %v, expected %v", st.Mtim, expected)
+ }
+}
+
+func TestSysctlClockinfo(t *testing.T) {
+ ci, err := unix.SysctlClockinfo("kern.clockrate")
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("tick = %v, tickadj = %v, hz = %v, profhz = %v, stathz = %v",
+ ci.Tick, ci.Tickadj, ci.Hz, ci.Profhz, ci.Stathz)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_test.go b/vendor/golang.org/x/sys/unix/syscall_linux_test.go
index 7fd5e2a..00aa655 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_test.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_test.go
@@ -7,9 +7,16 @@
package unix_test
import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
"os"
"runtime"
"runtime/debug"
+ "strconv"
+ "strings"
"testing"
"time"
@@ -31,11 +38,27 @@ func TestIoctlGetInt(t *testing.T) {
t.Logf("%d bits of entropy available", v)
}
+func TestIoctlGetRTCTime(t *testing.T) {
+ f, err := os.Open("/dev/rtc0")
+ if err != nil {
+ t.Skipf("skipping test, %v", err)
+ }
+ defer f.Close()
+
+ v, err := unix.IoctlGetRTCTime(int(f.Fd()))
+ if err != nil {
+ t.Fatalf("failed to perform ioctl: %v", err)
+ }
+
+ t.Logf("RTC time: %04d-%02d-%02d %02d:%02d:%02d", v.Year+1900, v.Mon+1, v.Mday, v.Hour, v.Min, v.Sec)
+}
+
func TestPpoll(t *testing.T) {
if runtime.GOOS == "android" {
t.Skip("mkfifo syscall is not available on android, skipping test")
}
+ defer chtmpdir(t)()
f, cleanup := mktmpfifo(t)
defer cleanup()
@@ -140,11 +163,16 @@ func TestUtimesNanoAt(t *testing.T) {
if err != nil {
t.Fatalf("Lstat: %v", err)
}
- if st.Atim != ts[0] {
- t.Errorf("UtimesNanoAt: wrong atime: %v", st.Atim)
+
+ // Only check Mtim, Atim might not be supported by the underlying filesystem
+ expected := ts[1]
+ if st.Mtim.Nsec == 0 {
+ // Some filesystems only support 1-second time stamp resolution
+ // and will always set Nsec to 0.
+ expected.Nsec = 0
}
- if st.Mtim != ts[1] {
- t.Errorf("UtimesNanoAt: wrong mtime: %v", st.Mtim)
+ if st.Mtim != expected {
+ t.Errorf("UtimesNanoAt: wrong mtime: expected %v, got %v", expected, st.Mtim)
}
}
@@ -172,7 +200,7 @@ func TestRlimitAs(t *testing.T) {
// should fail. See 'man 2 getrlimit'.
_, err = unix.Mmap(-1, 0, 2*unix.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_PRIVATE)
if err == nil {
- t.Fatal("Mmap: unexpectedly suceeded after setting RLIMIT_AS")
+ t.Fatal("Mmap: unexpectedly succeeded after setting RLIMIT_AS")
}
err = unix.Setrlimit(unix.RLIMIT_AS, &rlim)
@@ -267,6 +295,23 @@ func TestSchedSetaffinity(t *testing.T) {
t.Skip("skipping setaffinity tests on android")
}
+ // On a system like ppc64x where some cores can be disabled using ppc64_cpu,
+ // setaffinity should only be called with enabled cores. The valid cores
+ // are found from the oldMask, but if none are found then the setaffinity
+ // tests are skipped. Issue #27875.
+ if !oldMask.IsSet(cpu) {
+ newMask.Zero()
+ for i := 0; i < len(oldMask); i++ {
+ if oldMask.IsSet(i) {
+ newMask.Set(i)
+ break
+ }
+ }
+ if newMask.Count() == 0 {
+ t.Skip("skipping setaffinity tests if CPU not available")
+ }
+ }
+
err = unix.SchedSetaffinity(0, &newMask)
if err != nil {
t.Fatalf("SchedSetaffinity: %v", err)
@@ -384,3 +429,185 @@ func stringsFromByteSlice(buf []byte) []string {
}
return result
}
+
+func TestFaccessat(t *testing.T) {
+ defer chtmpdir(t)()
+ touch(t, "file1")
+
+ err := unix.Faccessat(unix.AT_FDCWD, "file1", unix.R_OK, 0)
+ if err != nil {
+ t.Errorf("Faccessat: unexpected error: %v", err)
+ }
+
+ err = unix.Faccessat(unix.AT_FDCWD, "file1", unix.R_OK, 2)
+ if err != unix.EINVAL {
+ t.Errorf("Faccessat: unexpected error: %v, want EINVAL", err)
+ }
+
+ err = unix.Faccessat(unix.AT_FDCWD, "file1", unix.R_OK, unix.AT_EACCESS)
+ if err != nil {
+ t.Errorf("Faccessat: unexpected error: %v", err)
+ }
+
+ err = os.Symlink("file1", "symlink1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = unix.Faccessat(unix.AT_FDCWD, "symlink1", unix.R_OK, unix.AT_SYMLINK_NOFOLLOW)
+ if err != nil {
+ t.Errorf("Faccessat SYMLINK_NOFOLLOW: unexpected error %v", err)
+ }
+
+ // We can't really test AT_SYMLINK_NOFOLLOW, because there
+ // doesn't seem to be any way to change the mode of a symlink.
+ // We don't test AT_EACCESS because such tests are only
+ // meaningful if run as root.
+
+ err = unix.Fchmodat(unix.AT_FDCWD, "file1", 0, 0)
+ if err != nil {
+ t.Errorf("Fchmodat: unexpected error %v", err)
+ }
+
+ err = unix.Faccessat(unix.AT_FDCWD, "file1", unix.F_OK, unix.AT_SYMLINK_NOFOLLOW)
+ if err != nil {
+ t.Errorf("Faccessat: unexpected error: %v", err)
+ }
+
+ err = unix.Faccessat(unix.AT_FDCWD, "file1", unix.R_OK, unix.AT_SYMLINK_NOFOLLOW)
+ if err != unix.EACCES {
+ if unix.Getuid() != 0 {
+ t.Errorf("Faccessat: unexpected error: %v, want EACCES", err)
+ }
+ }
+}
+
+func TestSyncFileRange(t *testing.T) {
+ file, err := ioutil.TempFile("", "TestSyncFileRange")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(file.Name())
+ defer file.Close()
+
+ err = unix.SyncFileRange(int(file.Fd()), 0, 0, 0)
+ if err == unix.ENOSYS || err == unix.EPERM {
+ t.Skip("sync_file_range syscall is not available, skipping test")
+ } else if err != nil {
+ t.Fatalf("SyncFileRange: %v", err)
+ }
+
+ // invalid flags
+ flags := 0xf00
+ err = unix.SyncFileRange(int(file.Fd()), 0, 0, flags)
+ if err != unix.EINVAL {
+ t.Fatalf("SyncFileRange: unexpected error: %v, want EINVAL", err)
+ }
+}
+
+func TestClockNanosleep(t *testing.T) {
+ delay := 100 * time.Millisecond
+
+ // Relative timespec.
+ start := time.Now()
+ rel := unix.NsecToTimespec(delay.Nanoseconds())
+ err := unix.ClockNanosleep(unix.CLOCK_MONOTONIC, 0, &rel, nil)
+ if err == unix.ENOSYS || err == unix.EPERM {
+ t.Skip("clock_nanosleep syscall is not available, skipping test")
+ } else if err != nil {
+ t.Errorf("ClockNanosleep(CLOCK_MONOTONIC, 0, %#v, nil) = %v", &rel, err)
+ } else if slept := time.Now().Sub(start); slept < delay {
+ t.Errorf("ClockNanosleep(CLOCK_MONOTONIC, 0, %#v, nil) slept only %v", &rel, slept)
+ }
+
+ // Absolute timespec.
+ start = time.Now()
+ until := start.Add(delay)
+ abs := unix.NsecToTimespec(until.UnixNano())
+ err = unix.ClockNanosleep(unix.CLOCK_REALTIME, unix.TIMER_ABSTIME, &abs, nil)
+ if err != nil {
+ t.Errorf("ClockNanosleep(CLOCK_REALTIME, TIMER_ABSTIME, %#v (=%v), nil) = %v", &abs, until, err)
+ } else if slept := time.Now().Sub(start); slept < delay {
+ t.Errorf("ClockNanosleep(CLOCK_REALTIME, TIMER_ABSTIME, %#v (=%v), nil) slept only %v", &abs, until, slept)
+ }
+
+ // Invalid clock. clock_nanosleep(2) says EINVAL, but it’s actually EOPNOTSUPP.
+ err = unix.ClockNanosleep(unix.CLOCK_THREAD_CPUTIME_ID, 0, &rel, nil)
+ if err != unix.EINVAL && err != unix.EOPNOTSUPP {
+ t.Errorf("ClockNanosleep(CLOCK_THREAD_CPUTIME_ID, 0, %#v, nil) = %v, want EINVAL or EOPNOTSUPP", &rel, err)
+ }
+}
+
+func TestOpenByHandleAt(t *testing.T) {
+ skipIfNotSupported := func(t *testing.T, name string, err error) {
+ if err == unix.EPERM {
+ t.Skipf("skipping %s test without CAP_DAC_READ_SEARCH", name)
+ }
+ if err == unix.ENOSYS {
+ t.Skipf("%s system call not available", name)
+ }
+ if err == unix.EOPNOTSUPP {
+ t.Skipf("%s not supported on this filesystem", name)
+ }
+ }
+
+ h, mountID, err := unix.NameToHandleAt(unix.AT_FDCWD, "syscall_linux_test.go", 0)
+ if err != nil {
+ skipIfNotSupported(t, "name_to_handle_at", err)
+ t.Fatalf("NameToHandleAt: %v", err)
+ }
+ t.Logf("mountID: %v, handle: size=%d, type=%d, bytes=%q", mountID,
+ h.Size(), h.Type(), h.Bytes())
+ mount, err := openMountByID(mountID)
+ if err != nil {
+ t.Fatalf("openMountByID: %v", err)
+ }
+ defer mount.Close()
+
+ for _, clone := range []bool{false, true} {
+ t.Run("clone="+strconv.FormatBool(clone), func(t *testing.T) {
+ if clone {
+ h = unix.NewFileHandle(h.Type(), h.Bytes())
+ }
+ fd, err := unix.OpenByHandleAt(int(mount.Fd()), h, unix.O_RDONLY)
+ skipIfNotSupported(t, "open_by_handle_at", err)
+ if err != nil {
+ t.Fatalf("OpenByHandleAt: %v", err)
+ }
+ defer unix.Close(fd)
+
+ t.Logf("opened fd %v", fd)
+ f := os.NewFile(uintptr(fd), "")
+ slurp, err := ioutil.ReadAll(f)
+ if err != nil {
+ t.Fatal(err)
+ }
+ const substr = "Some substring for a test."
+ if !strings.Contains(string(slurp), substr) {
+ t.Errorf("didn't find substring %q in opened file; read %d bytes", substr, len(slurp))
+ }
+ })
+ }
+}
+
+func openMountByID(mountID int) (f *os.File, err error) {
+ mi, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return nil, err
+ }
+ defer mi.Close()
+ bs := bufio.NewScanner(mi)
+ wantPrefix := []byte(fmt.Sprintf("%v ", mountID))
+ for bs.Scan() {
+ if !bytes.HasPrefix(bs.Bytes(), wantPrefix) {
+ continue
+ }
+ fields := strings.Fields(bs.Text())
+ dev := fields[4]
+ return os.Open(dev)
+ }
+ if err := bs.Err(); err != nil {
+ return nil, err
+ }
+ return nil, errors.New("mountID not found")
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_test.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_test.go
new file mode 100644
index 0000000..41141f9
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_test.go
@@ -0,0 +1,51 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix_test
+
+import (
+ "bytes"
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
+// stringsFromByteSlice converts a sequence of attributes to a []string.
+// On NetBSD, each entry consists of a single byte containing the length
+// of the attribute name, followed by the attribute name.
+// The name is _not_ NULL-terminated.
+func stringsFromByteSlice(buf []byte) []string {
+ var result []string
+ i := 0
+ for i < len(buf) {
+ next := i + 1 + int(buf[i])
+ result = append(result, string(buf[i+1:next]))
+ i = next
+ }
+ return result
+}
+
+func TestSysctlClockinfo(t *testing.T) {
+ ci, err := unix.SysctlClockinfo("kern.clockrate")
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("tick = %v, tickadj = %v, hz = %v, profhz = %v, stathz = %v",
+ ci.Tick, ci.Tickadj, ci.Hz, ci.Profhz, ci.Stathz)
+}
+
+func TestIoctlPtmget(t *testing.T) {
+ fd, err := unix.Open("/dev/ptmx", unix.O_NOCTTY|unix.O_RDWR, 0666)
+ if err != nil {
+ t.Skip("failed to open /dev/ptmx, skipping test")
+ }
+ defer unix.Close(fd)
+
+ ptm, err := unix.IoctlGetPtmget(fd, unix.TIOCPTSNAME)
+ if err != nil {
+ t.Fatalf("IoctlGetPtmget: %v\n", err)
+ }
+
+ t.Logf("sfd = %v, ptsname = %v", ptm.Sfd, string(ptm.Sn[:bytes.IndexByte(ptm.Sn[:], 0)]))
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_test.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_test.go
new file mode 100644
index 0000000..7bf75ee
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_test.go
@@ -0,0 +1,58 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix_test
+
+import (
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestPpoll(t *testing.T) {
+ f, cleanup := mktmpfifo(t)
+ defer cleanup()
+
+ const timeout = 100 * time.Millisecond
+
+ ok := make(chan bool, 1)
+ go func() {
+ select {
+ case <-time.After(10 * timeout):
+ t.Errorf("Ppoll: failed to timeout after %d", 10*timeout)
+ case <-ok:
+ }
+ }()
+
+ fds := []unix.PollFd{{Fd: int32(f.Fd()), Events: unix.POLLIN}}
+ timeoutTs := unix.NsecToTimespec(int64(timeout))
+ n, err := unix.Ppoll(fds, &timeoutTs, nil)
+ ok <- true
+ if err != nil {
+ t.Errorf("Ppoll: unexpected error: %v", err)
+ return
+ }
+ if n != 0 {
+ t.Errorf("Ppoll: wrong number of events: got %v, expected %v", n, 0)
+ return
+ }
+}
+
+func TestSysctlClockinfo(t *testing.T) {
+ ci, err := unix.SysctlClockinfo("kern.clockrate")
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("tick = %v, tickadj = %v, hz = %v, profhz = %v, stathz = %v",
+ ci.Tick, ci.Tickadj, ci.Hz, ci.Profhz, ci.Stathz)
+}
+
+func TestSysctlUvmexp(t *testing.T) {
+ uvm, err := unix.SysctlUvmexp("vm.uvmexp")
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("free = %v", uvm.Free)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_test.go b/vendor/golang.org/x/sys/unix/syscall_test.go
index a8eef7c..e0ecfa7 100644
--- a/vendor/golang.org/x/sys/unix/syscall_test.go
+++ b/vendor/golang.org/x/sys/unix/syscall_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package unix_test
@@ -58,3 +58,17 @@ func TestUname(t *testing.T) {
t.Logf("OS: %s/%s %s", utsname.Sysname[:], utsname.Machine[:], utsname.Release[:])
}
+
+// Test that this compiles. (Issue #31735)
+func TestStatFieldNames(t *testing.T) {
+ var st unix.Stat_t
+ var ts *unix.Timespec
+ ts = &st.Atim
+ ts = &st.Mtim
+ ts = &st.Ctim
+ _ = ts
+ secs := int64(st.Mtim.Sec)
+ nsecs := int64(st.Mtim.Nsec)
+ _ = secs
+ _ = nsecs
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_test.go b/vendor/golang.org/x/sys/unix/syscall_unix_test.go
index ad09716..d01964b 100644
--- a/vendor/golang.org/x/sys/unix/syscall_unix_test.go
+++ b/vendor/golang.org/x/sys/unix/syscall_unix_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package unix_test
@@ -15,6 +15,7 @@ import (
"os/exec"
"path/filepath"
"runtime"
+ "strconv"
"syscall"
"testing"
"time"
@@ -98,6 +99,45 @@ func TestErrnoSignalName(t *testing.T) {
}
}
+func TestSignalNum(t *testing.T) {
+ testSignals := []struct {
+ name string
+ want syscall.Signal
+ }{
+ {"SIGHUP", syscall.SIGHUP},
+ {"SIGPIPE", syscall.SIGPIPE},
+ {"SIGSEGV", syscall.SIGSEGV},
+ {"NONEXISTS", 0},
+ }
+ for _, ts := range testSignals {
+ t.Run(fmt.Sprintf("%s/%d", ts.name, ts.want), func(t *testing.T) {
+ got := unix.SignalNum(ts.name)
+ if got != ts.want {
+ t.Errorf("SignalNum(%s) returned %d, want %d", ts.name, got, ts.want)
+ }
+ })
+
+ }
+}
+
+func TestFcntlInt(t *testing.T) {
+ t.Parallel()
+ file, err := ioutil.TempFile("", "TestFnctlInt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(file.Name())
+ defer file.Close()
+ f := file.Fd()
+ flags, err := unix.FcntlInt(f, unix.F_GETFD, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if flags&unix.FD_CLOEXEC == 0 {
+ t.Errorf("flags %#x do not include FD_CLOEXEC", flags)
+ }
+}
+
// TestFcntlFlock tests whether the file locking structure matches
// the calling convention of each kernel.
func TestFcntlFlock(t *testing.T) {
@@ -134,6 +174,27 @@ func TestPassFD(t *testing.T) {
return
}
+ if runtime.GOOS == "aix" {
+ // Unix network isn't properly working on AIX
+ // 7.2 with Technical Level < 2
+ out, err := exec.Command("oslevel", "-s").Output()
+ if err != nil {
+ t.Skipf("skipping on AIX because oslevel -s failed: %v", err)
+ }
+
+ if len(out) < len("7200-XX-ZZ-YYMM") { // AIX 7.2, Tech Level XX, Service Pack ZZ, date YYMM
+ t.Skip("skipping on AIX because oslevel -s hasn't the right length")
+ }
+ aixVer := string(out[:4])
+ tl, err := strconv.Atoi(string(out[5:7]))
+ if err != nil {
+ t.Skipf("skipping on AIX because oslevel -s output cannot be parsed: %v", err)
+ }
+ if aixVer < "7200" || (aixVer == "7200" && tl < 2) {
+ t.Skip("skipped on AIX versions previous to 7.2 TL 2")
+ }
+ }
+
tempDir, err := ioutil.TempDir("", "TestPassFD")
if err != nil {
t.Fatal(err)
@@ -316,6 +377,12 @@ func TestRlimit(t *testing.T) {
}
set := rlimit
set.Cur = set.Max - 1
+ if runtime.GOOS == "darwin" && set.Cur > 10240 {
+ // The max file limit is 10240, even though
+ // the max returned by Getrlimit is 1<<63-1.
+ // This is OPEN_MAX in sys/syslimits.h.
+ set.Cur = 10240
+ }
err = unix.Setrlimit(unix.RLIMIT_NOFILE, &set)
if err != nil {
t.Fatalf("Setrlimit: set failed: %#v %v", set, err)
@@ -355,6 +422,14 @@ func TestSeekFailure(t *testing.T) {
}
}
+func TestSetsockoptString(t *testing.T) {
+ // should not panic on empty string, see issue #31277
+ err := unix.SetsockoptString(-1, 0, 0, "")
+ if err == nil {
+ t.Fatalf("SetsockoptString: did not fail")
+ }
+}
+
func TestDup(t *testing.T) {
file, err := ioutil.TempFile("", "TestDup")
if err != nil {
@@ -369,14 +444,24 @@ func TestDup(t *testing.T) {
t.Fatalf("Dup: %v", err)
}
- err = unix.Dup2(newFd, newFd+1)
+ // Create and reserve a file descriptor.
+ // Dup2 automatically closes it before reusing it.
+ nullFile, err := os.Open("/dev/null")
+ if err != nil {
+ t.Fatal(err)
+ }
+ dupFd := int(file.Fd())
+ err = unix.Dup2(newFd, dupFd)
if err != nil {
t.Fatalf("Dup2: %v", err)
}
+ // Keep the dummy file open long enough to not be closed in
+ // its finalizer.
+ runtime.KeepAlive(nullFile)
b1 := []byte("Test123")
b2 := make([]byte, 7)
- _, err = unix.Write(newFd+1, b1)
+ _, err = unix.Write(dupFd, b1)
if err != nil {
t.Fatalf("Write to dup2 fd failed: %v", err)
}
@@ -399,6 +484,7 @@ func TestPoll(t *testing.T) {
t.Skip("mkfifo syscall is not available on android and iOS, skipping test")
}
+ defer chtmpdir(t)()
f, cleanup := mktmpfifo(t)
defer cleanup()
@@ -432,9 +518,9 @@ func TestGetwd(t *testing.T) {
t.Fatalf("Open .: %s", err)
}
defer fd.Close()
- // These are chosen carefully not to be symlinks on a Mac
- // (unlike, say, /var, /etc)
- dirs := []string{"/", "/usr/bin"}
+ // Directory list for test. Do not worry if any are symlinks or do not
+ // exist on some common unix desktop environments. That will be checked.
+ dirs := []string{"/", "/usr/bin", "/etc", "/var", "/opt"}
switch runtime.GOOS {
case "android":
dirs = []string{"/", "/system/bin"}
@@ -454,6 +540,17 @@ func TestGetwd(t *testing.T) {
}
oldwd := os.Getenv("PWD")
for _, d := range dirs {
+ // Check whether d exists, is a dir and that d's path does not contain a symlink
+ fi, err := os.Stat(d)
+ if err != nil || !fi.IsDir() {
+ t.Logf("Test dir %s stat error (%v) or not a directory, skipping", d, err)
+ continue
+ }
+ check, err := filepath.EvalSymlinks(d)
+ if err != nil || check != d {
+ t.Logf("Test dir %s (%s) is symlink or other error (%v), skipping", d, check, err)
+ continue
+ }
err = os.Chdir(d)
if err != nil {
t.Fatalf("Chdir: %v", err)
@@ -546,7 +643,8 @@ func TestFchmodat(t *testing.T) {
didChmodSymlink := true
err = unix.Fchmodat(unix.AT_FDCWD, "symlink1", uint32(mode), unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
- if (runtime.GOOS == "android" || runtime.GOOS == "linux" || runtime.GOOS == "solaris") && err == unix.EOPNOTSUPP {
+ if (runtime.GOOS == "android" || runtime.GOOS == "linux" ||
+ runtime.GOOS == "solaris" || runtime.GOOS == "illumos") && err == unix.EOPNOTSUPP {
// Linux and Illumos don't support flags != 0
didChmodSymlink = false
} else {
@@ -585,6 +683,29 @@ func TestMkdev(t *testing.T) {
}
}
+func TestRenameat(t *testing.T) {
+ defer chtmpdir(t)()
+
+ from, to := "renamefrom", "renameto"
+
+ touch(t, from)
+
+ err := unix.Renameat(unix.AT_FDCWD, from, unix.AT_FDCWD, to)
+ if err != nil {
+ t.Fatalf("Renameat: unexpected error: %v", err)
+ }
+
+ _, err = os.Stat(to)
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, err = os.Stat(from)
+ if err == nil {
+ t.Errorf("Renameat: stat of renamed file %q unexpectedly succeeded", from)
+ }
+}
+
// mktmpfifo creates a temporary FIFO and provides a cleanup function.
func mktmpfifo(t *testing.T) (*os.File, func()) {
err := unix.Mkfifo("fifo", 0666)
diff --git a/vendor/golang.org/x/sys/unix/timestruct_test.go b/vendor/golang.org/x/sys/unix/timestruct_test.go
index 4215f46..1a72fdb 100644
--- a/vendor/golang.org/x/sys/unix/timestruct_test.go
+++ b/vendor/golang.org/x/sys/unix/timestruct_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package unix_test
diff --git a/vendor/golang.org/x/sys/unix/xattr_test.go b/vendor/golang.org/x/sys/unix/xattr_test.go
index b8b28d0..57fc84f 100644
--- a/vendor/golang.org/x/sys/unix/xattr_test.go
+++ b/vendor/golang.org/x/sys/unix/xattr_test.go
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin freebsd linux
+// +build darwin freebsd linux netbsd
package unix_test
import (
+ "io/ioutil"
"os"
"runtime"
"strings"
@@ -23,13 +24,19 @@ func TestXattr(t *testing.T) {
xattrName := "user.test"
xattrDataSet := "gopher"
- err := unix.Setxattr(f, xattrName, []byte(xattrDataSet), 0)
+
+ err := unix.Setxattr(f, xattrName, []byte{}, 0)
if err == unix.ENOTSUP || err == unix.EOPNOTSUPP {
t.Skip("filesystem does not support extended attributes, skipping test")
} else if err != nil {
t.Fatalf("Setxattr: %v", err)
}
+ err = unix.Setxattr(f, xattrName, []byte(xattrDataSet), 0)
+ if err != nil {
+ t.Fatalf("Setxattr: %v", err)
+ }
+
// find size
size, err := unix.Listxattr(f, nil)
if err != nil {
@@ -117,3 +124,84 @@ func TestXattr(t *testing.T) {
}
}
}
+
+func TestFdXattr(t *testing.T) {
+ file, err := ioutil.TempFile("", "TestFdXattr")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(file.Name())
+ defer file.Close()
+
+ fd := int(file.Fd())
+ xattrName := "user.test"
+ xattrDataSet := "gopher"
+
+ err = unix.Fsetxattr(fd, xattrName, []byte(xattrDataSet), 0)
+ if err == unix.ENOTSUP || err == unix.EOPNOTSUPP {
+ t.Skip("filesystem does not support extended attributes, skipping test")
+ } else if err != nil {
+ t.Fatalf("Fsetxattr: %v", err)
+ }
+
+ // find size
+ size, err := unix.Flistxattr(fd, nil)
+ if err != nil {
+ t.Fatalf("Flistxattr: %v", err)
+ }
+
+ if size <= 0 {
+ t.Fatalf("Flistxattr returned an empty list of attributes")
+ }
+
+ buf := make([]byte, size)
+ read, err := unix.Flistxattr(fd, buf)
+ if err != nil {
+ t.Fatalf("Flistxattr: %v", err)
+ }
+
+ xattrs := stringsFromByteSlice(buf[:read])
+
+ xattrWant := xattrName
+ if runtime.GOOS == "freebsd" {
+ // On FreeBSD, the namespace is stored separately from the xattr
+ // name and Listxattr doesn't return the namespace prefix.
+ xattrWant = strings.TrimPrefix(xattrWant, "user.")
+ }
+ found := false
+ for _, name := range xattrs {
+ if name == xattrWant {
+ found = true
+ }
+ }
+
+ if !found {
+ t.Errorf("Flistxattr did not return previously set attribute '%s'", xattrName)
+ }
+
+ // find size
+ size, err = unix.Fgetxattr(fd, xattrName, nil)
+ if err != nil {
+ t.Fatalf("Fgetxattr: %v", err)
+ }
+
+ if size <= 0 {
+ t.Fatalf("Fgetxattr returned an empty attribute")
+ }
+
+ xattrDataGet := make([]byte, size)
+ _, err = unix.Fgetxattr(fd, xattrName, xattrDataGet)
+ if err != nil {
+ t.Fatalf("Fgetxattr: %v", err)
+ }
+
+ got := string(xattrDataGet)
+ if got != xattrDataSet {
+ t.Errorf("Fgetxattr: expected attribute value %s, got %s", xattrDataSet, got)
+ }
+
+ err = unix.Fremovexattr(fd, xattrName)
+ if err != nil {
+ t.Fatalf("Fremovexattr: %v", err)
+ }
+}
diff --git a/vendor/golang.org/x/sys/windows/syscall_test.go b/vendor/golang.org/x/sys/windows/syscall_test.go
index d7009e4..f09c6dd 100644
--- a/vendor/golang.org/x/sys/windows/syscall_test.go
+++ b/vendor/golang.org/x/sys/windows/syscall_test.go
@@ -7,6 +7,7 @@
package windows_test
import (
+ "strings"
"syscall"
"testing"
@@ -51,3 +52,13 @@ func TestGetProcAddressByOrdinal(t *testing.T) {
t.Error("shlwapi.dll:IsOS(OS_NT) returned 0, expected non-zero value")
}
}
+
+func TestGetSystemDirectory(t *testing.T) {
+ d, err := windows.GetSystemDirectory()
+ if err != nil {
+ t.Fatalf("Failed to get system directory: %s", err)
+ }
+ if !strings.HasSuffix(strings.ToLower(d), "\\system32") {
+ t.Fatalf("System directory does not end in system32: %s", d)
+ }
+}
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows_test.go b/vendor/golang.org/x/sys/windows/syscall_windows_test.go
index 0e27464..ff9689a 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows_test.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows_test.go
@@ -5,12 +5,13 @@
package windows_test
import (
+ "fmt"
"io/ioutil"
"os"
"path/filepath"
+ "runtime"
"syscall"
"testing"
- "unsafe"
"golang.org/x/sys/windows"
)
@@ -54,34 +55,14 @@ func TestWin32finddata(t *testing.T) {
}
func TestFormatMessage(t *testing.T) {
- dll := windows.MustLoadDLL("pdh.dll")
-
- pdhOpenQuery := func(datasrc *uint16, userdata uint32, query *windows.Handle) (errno uintptr) {
- r0, _, _ := syscall.Syscall(dll.MustFindProc("PdhOpenQueryW").Addr(), 3, uintptr(unsafe.Pointer(datasrc)), uintptr(userdata), uintptr(unsafe.Pointer(query)))
- return r0
- }
-
- pdhCloseQuery := func(query windows.Handle) (errno uintptr) {
- r0, _, _ := syscall.Syscall(dll.MustFindProc("PdhCloseQuery").Addr(), 1, uintptr(query), 0, 0)
- return r0
- }
-
- var q windows.Handle
- name, err := windows.UTF16PtrFromString("no_such_source")
- if err != nil {
- t.Fatal(err)
- }
- errno := pdhOpenQuery(name, 0, &q)
- if errno == 0 {
- pdhCloseQuery(q)
- t.Fatal("PdhOpenQuery succeeded, but expected to fail.")
- }
+ dll := windows.MustLoadDLL("netevent.dll")
+ const TITLE_SC_MESSAGE_BOX uint32 = 0xC0001B75
const flags uint32 = syscall.FORMAT_MESSAGE_FROM_HMODULE | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS
buf := make([]uint16, 300)
- _, err = windows.FormatMessage(flags, uintptr(dll.Handle), uint32(errno), 0, buf, nil)
+ _, err := windows.FormatMessage(flags, uintptr(dll.Handle), TITLE_SC_MESSAGE_BOX, 0, buf, nil)
if err != nil {
- t.Fatalf("FormatMessage for handle=%x and errno=%x failed: %v", dll.Handle, errno, err)
+ t.Fatalf("FormatMessage for handle=%x and errno=%x failed: %v", dll.Handle, TITLE_SC_MESSAGE_BOX, err)
}
}
@@ -111,3 +92,134 @@ func TestTOKEN_ALL_ACCESS(t *testing.T) {
t.Errorf("TOKEN_ALL_ACCESS = %x, want 0xF01FF", windows.TOKEN_ALL_ACCESS)
}
}
+
+func TestCreateWellKnownSid(t *testing.T) {
+ sid, err := windows.CreateWellKnownSid(windows.WinBuiltinAdministratorsSid)
+ if err != nil {
+ t.Fatalf("Unable to create well known sid for administrators: %v", err)
+ }
+ sidStr, err := sid.String()
+ if err != nil {
+ t.Fatalf("Unable to convert sid into string: %v", err)
+ }
+ if sidStr != "S-1-5-32-544" {
+ t.Fatalf("Expecting administrators to be S-1-5-32-544, but found %s instead", sidStr)
+ }
+}
+
+func TestPseudoTokens(t *testing.T) {
+ version, err := windows.GetVersion()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ((version&0xffff)>>8)|((version&0xff)<<8) < 0x0602 {
+ return
+ }
+
+ realProcessToken, err := windows.OpenCurrentProcessToken()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer realProcessToken.Close()
+ realProcessUser, err := realProcessToken.GetTokenUser()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pseudoProcessToken := windows.GetCurrentProcessToken()
+ pseudoProcessUser, err := pseudoProcessToken.GetTokenUser()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !windows.EqualSid(realProcessUser.User.Sid, pseudoProcessUser.User.Sid) {
+ t.Fatal("The real process token does not have the same as the pseudo process token")
+ }
+
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ err = windows.RevertToSelf()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pseudoThreadToken := windows.GetCurrentThreadToken()
+ _, err = pseudoThreadToken.GetTokenUser()
+ if err != windows.ERROR_NO_TOKEN {
+ t.Fatal("Expected an empty thread token")
+ }
+ pseudoThreadEffectiveToken := windows.GetCurrentThreadEffectiveToken()
+ pseudoThreadEffectiveUser, err := pseudoThreadEffectiveToken.GetTokenUser()
+ if err != nil {
+ t.Fatal(nil)
+ }
+ if !windows.EqualSid(realProcessUser.User.Sid, pseudoThreadEffectiveUser.User.Sid) {
+ t.Fatal("The real process token does not have the same as the pseudo thread effective token, even though we aren't impersonating")
+ }
+
+ err = windows.ImpersonateSelf(windows.SecurityImpersonation)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer windows.RevertToSelf()
+ pseudoThreadUser, err := pseudoThreadToken.GetTokenUser()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !windows.EqualSid(realProcessUser.User.Sid, pseudoThreadUser.User.Sid) {
+ t.Fatal("The real process token does not have the same as the pseudo thread token after impersonating self")
+ }
+}
+
+func TestGUID(t *testing.T) {
+ guid, err := windows.GenerateGUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if guid.Data1 == 0 && guid.Data2 == 0 && guid.Data3 == 0 && guid.Data4 == [8]byte{} {
+ t.Fatal("Got an all zero GUID, which is overwhelmingly unlikely")
+ }
+ want := fmt.Sprintf("{%08X-%04X-%04X-%04X-%012X}", guid.Data1, guid.Data2, guid.Data3, guid.Data4[:2], guid.Data4[2:])
+ got := guid.String()
+ if got != want {
+ t.Fatalf("String = %q; want %q", got, want)
+ }
+ guid2, err := windows.GUIDFromString(got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if guid2 != guid {
+ t.Fatalf("Did not parse string back to original GUID = %q; want %q", guid2, guid)
+ }
+ _, err = windows.GUIDFromString("not-a-real-guid")
+ if err != syscall.Errno(windows.CO_E_CLASSSTRING) {
+ t.Fatalf("Bad GUID string error = %v; want CO_E_CLASSSTRING", err)
+ }
+}
+
+func TestKnownFolderPath(t *testing.T) {
+ token, err := windows.OpenCurrentProcessToken()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer token.Close()
+ profileDir, err := token.GetUserProfileDirectory()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := filepath.Join(profileDir, "Desktop")
+ got, err := windows.KnownFolderPath(windows.FOLDERID_Desktop, windows.KF_FLAG_DEFAULT)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want != got {
+ t.Fatalf("Path = %q; want %q", got, want)
+ }
+}
+
+func TestRtlGetVersion(t *testing.T) {
+ version := windows.RtlGetVersion()
+ if version.MajorVersion < 6 {
+ t.Fatalf("MajorVersion = %d; want >= 6", version.MajorVersion)
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing_test.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing_test.go
new file mode 100644
index 0000000..107f87c
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing_test.go
@@ -0,0 +1,195 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package astutil_test
+
+// This file defines tests of PathEnclosingInterval.
+
+// TODO(adonovan): exhaustive tests that run over the whole input
+// tree, not just handcrafted examples.
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/ast/astutil"
+)
+
+// pathToString returns a string containing the concrete types of the
+// nodes in path.
+func pathToString(path []ast.Node) string {
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, "[")
+ for i, n := range path {
+ if i > 0 {
+ fmt.Fprint(&buf, " ")
+ }
+ fmt.Fprint(&buf, strings.TrimPrefix(fmt.Sprintf("%T", n), "*ast."))
+ }
+ fmt.Fprint(&buf, "]")
+ return buf.String()
+}
+
+// findInterval parses input and returns the [start, end) positions of
+// the first occurrence of substr in input. f==nil indicates failure;
+// an error has already been reported in that case.
+//
+func findInterval(t *testing.T, fset *token.FileSet, input, substr string) (f *ast.File, start, end token.Pos) {
+ f, err := parser.ParseFile(fset, "", input, 0)
+ if err != nil {
+ t.Errorf("parse error: %s", err)
+ return
+ }
+
+ i := strings.Index(input, substr)
+ if i < 0 {
+ t.Errorf("%q is not a substring of input", substr)
+ f = nil
+ return
+ }
+
+ filePos := fset.File(f.Package)
+ return f, filePos.Pos(i), filePos.Pos(i + len(substr))
+}
+
+// Common input for following tests.
+const input = `
+// Hello.
+package main
+import "fmt"
+func f() {}
+func main() {
+ z := (x + y) // add them
+ f() // NB: ExprStmt and its CallExpr have same Pos/End
+}
+`
+
+func TestPathEnclosingInterval_Exact(t *testing.T) {
+ // For the exact tests, we check that a substring is mapped to
+ // the canonical string for the node it denotes.
+ tests := []struct {
+ substr string // first occurrence of this string indicates interval
+ node string // complete text of expected containing node
+ }{
+ {"package",
+ input[11 : len(input)-1]},
+ {"\npack",
+ input[11 : len(input)-1]},
+ {"main",
+ "main"},
+ {"import",
+ "import \"fmt\""},
+ {"\"fmt\"",
+ "\"fmt\""},
+ {"\nfunc f() {}\n",
+ "func f() {}"},
+ {"x ",
+ "x"},
+ {" y",
+ "y"},
+ {"z",
+ "z"},
+ {" + ",
+ "x + y"},
+ {" :=",
+ "z := (x + y)"},
+ {"x + y",
+ "x + y"},
+ {"(x + y)",
+ "(x + y)"},
+ {" (x + y) ",
+ "(x + y)"},
+ {" (x + y) // add",
+ "(x + y)"},
+ {"func",
+ "func f() {}"},
+ {"func f() {}",
+ "func f() {}"},
+ {"\nfun",
+ "func f() {}"},
+ {" f",
+ "f"},
+ }
+ for _, test := range tests {
+ f, start, end := findInterval(t, new(token.FileSet), input, test.substr)
+ if f == nil {
+ continue
+ }
+
+ path, exact := astutil.PathEnclosingInterval(f, start, end)
+ if !exact {
+ t.Errorf("PathEnclosingInterval(%q) not exact", test.substr)
+ continue
+ }
+
+ if len(path) == 0 {
+ if test.node != "" {
+ t.Errorf("PathEnclosingInterval(%q).path: got [], want %q",
+ test.substr, test.node)
+ }
+ continue
+ }
+
+ if got := input[path[0].Pos():path[0].End()]; got != test.node {
+ t.Errorf("PathEnclosingInterval(%q): got %q, want %q (path was %s)",
+ test.substr, got, test.node, pathToString(path))
+ continue
+ }
+ }
+}
+
+func TestPathEnclosingInterval_Paths(t *testing.T) {
+ // For these tests, we check only the path of the enclosing
+ // node, but not its complete text because it's often quite
+ // large when !exact.
+ tests := []struct {
+ substr string // first occurrence of this string indicates interval
+ path string // the pathToString(),exact of the expected path
+ }{
+ {"// add",
+ "[BlockStmt FuncDecl File],false"},
+ {"(x + y",
+ "[ParenExpr AssignStmt BlockStmt FuncDecl File],false"},
+ {"x +",
+ "[BinaryExpr ParenExpr AssignStmt BlockStmt FuncDecl File],false"},
+ {"z := (x",
+ "[AssignStmt BlockStmt FuncDecl File],false"},
+ {"func f",
+ "[FuncDecl File],false"},
+ {"func f()",
+ "[FuncDecl File],false"},
+ {" f()",
+ "[FuncDecl File],false"},
+ {"() {}",
+ "[FuncDecl File],false"},
+ {"// Hello",
+ "[File],false"},
+ {" f",
+ "[Ident FuncDecl File],true"},
+ {"func ",
+ "[FuncDecl File],true"},
+ {"mai",
+ "[Ident File],true"},
+ {"f() // NB",
+ "[CallExpr ExprStmt BlockStmt FuncDecl File],true"},
+ }
+ for _, test := range tests {
+ f, start, end := findInterval(t, new(token.FileSet), input, test.substr)
+ if f == nil {
+ continue
+ }
+
+ path, exact := astutil.PathEnclosingInterval(f, start, end)
+ if got := fmt.Sprintf("%s,%v", pathToString(path), exact); got != test.path {
+ t.Errorf("PathEnclosingInterval(%q): got %q, want %q",
+ test.substr, got, test.path)
+ continue
+ }
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports_test.go b/vendor/golang.org/x/tools/go/ast/astutil/imports_test.go
new file mode 100644
index 0000000..1d86e47
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/astutil/imports_test.go
@@ -0,0 +1,2087 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package astutil
+
+import (
+ "bytes"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "reflect"
+ "strconv"
+ "testing"
+)
+
+var fset = token.NewFileSet()
+
+func parse(t *testing.T, name, in string) *ast.File {
+ file, err := parser.ParseFile(fset, name, in, parser.ParseComments)
+ if err != nil {
+ t.Fatalf("%s parse: %v", name, err)
+ }
+ return file
+}
+
+func print(t *testing.T, name string, f *ast.File) string {
+ var buf bytes.Buffer
+ if err := format.Node(&buf, fset, f); err != nil {
+ t.Fatalf("%s gofmt: %v", name, err)
+ }
+ return buf.String()
+}
+
+type test struct {
+ name string
+ renamedPkg string
+ pkg string
+ in string
+ out string
+ unchanged bool // Expect added/deleted return value to be false.
+}
+
+var addTests = []test{
+ {
+ name: "leave os alone",
+ pkg: "os",
+ in: `package main
+
+import (
+ "os"
+)
+`,
+ out: `package main
+
+import (
+ "os"
+)
+`,
+ unchanged: true,
+ },
+ {
+ name: "import.1",
+ pkg: "os",
+ in: `package main
+`,
+ out: `package main
+
+import "os"
+`,
+ },
+ {
+ name: "import.2",
+ pkg: "os",
+ in: `package main
+
+// Comment
+import "C"
+`,
+ out: `package main
+
+// Comment
+import "C"
+import "os"
+`,
+ },
+ {
+ name: "import.3",
+ pkg: "os",
+ in: `package main
+
+// Comment
+import "C"
+
+import (
+ "io"
+ "utf8"
+)
+`,
+ out: `package main
+
+// Comment
+import "C"
+
+import (
+ "io"
+ "os"
+ "utf8"
+)
+`,
+ },
+ {
+ name: "import.17",
+ pkg: "x/y/z",
+ in: `package main
+
+// Comment
+import "C"
+
+import (
+ "a"
+ "b"
+
+ "x/w"
+
+ "d/f"
+)
+`,
+ out: `package main
+
+// Comment
+import "C"
+
+import (
+ "a"
+ "b"
+
+ "x/w"
+ "x/y/z"
+
+ "d/f"
+)
+`,
+ },
+ {
+ name: "issue #19190",
+ pkg: "x.org/y/z",
+ in: `package main
+
+// Comment
+import "C"
+
+import (
+ "bytes"
+ "os"
+
+ "d.com/f"
+)
+`,
+ out: `package main
+
+// Comment
+import "C"
+
+import (
+ "bytes"
+ "os"
+
+ "d.com/f"
+ "x.org/y/z"
+)
+`,
+ },
+ {
+ name: "issue #19190 with existing grouped import packages",
+ pkg: "x.org/y/z",
+ in: `package main
+
+// Comment
+import "C"
+
+import (
+ "bytes"
+ "os"
+
+ "c.com/f"
+ "d.com/f"
+
+ "y.com/a"
+ "y.com/b"
+ "y.com/c"
+)
+`,
+ out: `package main
+
+// Comment
+import "C"
+
+import (
+ "bytes"
+ "os"
+
+ "c.com/f"
+ "d.com/f"
+ "x.org/y/z"
+
+ "y.com/a"
+ "y.com/b"
+ "y.com/c"
+)
+`,
+ },
+ {
+ name: "issue #19190 - match score is still respected",
+ pkg: "y.org/c",
+ in: `package main
+
+import (
+ "x.org/a"
+
+ "y.org/b"
+)
+`,
+ out: `package main
+
+import (
+ "x.org/a"
+
+ "y.org/b"
+ "y.org/c"
+)
+`,
+ },
+ {
+ name: "import into singular group",
+ pkg: "bytes",
+ in: `package main
+
+import "os"
+
+`,
+ out: `package main
+
+import (
+ "bytes"
+ "os"
+)
+`,
+ },
+ {
+ name: "import into singular group with comment",
+ pkg: "bytes",
+ in: `package main
+
+import /* why */ /* comment here? */ "os"
+
+`,
+ out: `package main
+
+import /* why */ /* comment here? */ (
+ "bytes"
+ "os"
+)
+`,
+ },
+ {
+ name: "import into group with leading comment",
+ pkg: "strings",
+ in: `package main
+
+import (
+ // comment before bytes
+ "bytes"
+ "os"
+)
+
+`,
+ out: `package main
+
+import (
+ // comment before bytes
+ "bytes"
+ "os"
+ "strings"
+)
+`,
+ },
+ {
+ name: "",
+ renamedPkg: "fmtpkg",
+ pkg: "fmt",
+ in: `package main
+
+import "os"
+
+`,
+ out: `package main
+
+import (
+ fmtpkg "fmt"
+ "os"
+)
+`,
+ },
+ {
+ name: "struct comment",
+ pkg: "time",
+ in: `package main
+
+// This is a comment before a struct.
+type T struct {
+ t time.Time
+}
+`,
+ out: `package main
+
+import "time"
+
+// This is a comment before a struct.
+type T struct {
+ t time.Time
+}
+`,
+ },
+ {
+ name: "issue 8729 import C",
+ pkg: "time",
+ in: `package main
+
+import "C"
+
+// comment
+type T time.Time
+`,
+ out: `package main
+
+import "C"
+import "time"
+
+// comment
+type T time.Time
+`,
+ },
+ {
+ name: "issue 8729 empty import",
+ pkg: "time",
+ in: `package main
+
+import ()
+
+// comment
+type T time.Time
+`,
+ out: `package main
+
+import "time"
+
+// comment
+type T time.Time
+`,
+ },
+ {
+ name: "issue 8729 comment on package line",
+ pkg: "time",
+ in: `package main // comment
+
+type T time.Time
+`,
+ out: `package main // comment
+
+import "time"
+
+type T time.Time
+`,
+ },
+ {
+ name: "issue 8729 comment after package",
+ pkg: "time",
+ in: `package main
+// comment
+
+type T time.Time
+`,
+ out: `package main
+
+import "time"
+
+// comment
+
+type T time.Time
+`,
+ },
+ {
+ name: "issue 8729 comment before and on package line",
+ pkg: "time",
+ in: `// comment before
+package main // comment on
+
+type T time.Time
+`,
+ out: `// comment before
+package main // comment on
+
+import "time"
+
+type T time.Time
+`,
+ },
+
+ // Issue 9961: Match prefixes using path segments rather than bytes
+ {
+ name: "issue 9961",
+ pkg: "regexp",
+ in: `package main
+
+import (
+ "flag"
+ "testing"
+
+ "rsc.io/p"
+)
+`,
+ out: `package main
+
+import (
+ "flag"
+ "regexp"
+ "testing"
+
+ "rsc.io/p"
+)
+`,
+ },
+ // Issue 10337: Preserve comment position
+ {
+ name: "issue 10337",
+ pkg: "fmt",
+ in: `package main
+
+import (
+ "bytes" // a
+ "log" // c
+)
+`,
+ out: `package main
+
+import (
+ "bytes" // a
+ "fmt"
+ "log" // c
+)
+`,
+ },
+ {
+ name: "issue 10337 new import at the start",
+ pkg: "bytes",
+ in: `package main
+
+import (
+ "fmt" // b
+ "log" // c
+)
+`,
+ out: `package main
+
+import (
+ "bytes"
+ "fmt" // b
+ "log" // c
+)
+`,
+ },
+ {
+ name: "issue 10337 new import at the end",
+ pkg: "log",
+ in: `package main
+
+import (
+ "bytes" // a
+ "fmt" // b
+)
+`,
+ out: `package main
+
+import (
+ "bytes" // a
+ "fmt" // b
+ "log"
+)
+`,
+ },
+ // Issue 14075: Merge import declarations
+ {
+ name: "issue 14075",
+ pkg: "bufio",
+ in: `package main
+
+import "bytes"
+import "fmt"
+`,
+ out: `package main
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+)
+`,
+ },
+ {
+ name: "issue 14075 update position",
+ pkg: "bufio",
+ in: `package main
+
+import "bytes"
+import (
+ "fmt"
+)
+`,
+ out: `package main
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+)
+`,
+ },
+ {
+ name: `issue 14075 ignore import "C"`,
+ pkg: "bufio",
+ in: `package main
+
+// Comment
+import "C"
+
+import "bytes"
+import "fmt"
+`,
+ out: `package main
+
+// Comment
+import "C"
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+)
+`,
+ },
+ {
+ name: `issue 14075 ignore adjacent import "C"`,
+ pkg: "bufio",
+ in: `package main
+
+// Comment
+import "C"
+import "fmt"
+`,
+ out: `package main
+
+// Comment
+import "C"
+import (
+ "bufio"
+ "fmt"
+)
+`,
+ },
+ {
+ name: `issue 14075 ignore adjacent import "C" (without factored import)`,
+ pkg: "bufio",
+ in: `package main
+
+// Comment
+import "C"
+import "fmt"
+`,
+ out: `package main
+
+// Comment
+import "C"
+import (
+ "bufio"
+ "fmt"
+)
+`,
+ },
+ {
+ name: `issue 14075 ignore single import "C"`,
+ pkg: "bufio",
+ in: `package main
+
+// Comment
+import "C"
+`,
+ out: `package main
+
+// Comment
+import "C"
+import "bufio"
+`,
+ },
+ {
+ name: `issue 17212 several single-import lines with shared prefix ending in a slash`,
+ pkg: "net/http",
+ in: `package main
+
+import "bufio"
+import "net/url"
+`,
+ out: `package main
+
+import (
+ "bufio"
+ "net/http"
+ "net/url"
+)
+`,
+ },
+ {
+ name: `issue 17212 block imports lines with shared prefix ending in a slash`,
+ pkg: "net/http",
+ in: `package main
+
+import (
+ "bufio"
+ "net/url"
+)
+`,
+ out: `package main
+
+import (
+ "bufio"
+ "net/http"
+ "net/url"
+)
+`,
+ },
+ {
+ name: `issue 17213 many single-import lines`,
+ pkg: "fmt",
+ in: `package main
+
+import "bufio"
+import "bytes"
+import "errors"
+`,
+ out: `package main
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+)
+`,
+ },
+
+ // Issue 28605: Add specified import, even if that import path is imported under another name
+ {
+ name: "issue 28605 add unnamed path",
+ renamedPkg: "",
+ pkg: "path",
+ in: `package main
+
+import (
+ . "path"
+ _ "path"
+ pathpkg "path"
+)
+`,
+ out: `package main
+
+import (
+ "path"
+ . "path"
+ _ "path"
+ pathpkg "path"
+)
+`,
+ },
+ {
+ name: "issue 28605 add pathpkg-renamed path",
+ renamedPkg: "pathpkg",
+ pkg: "path",
+ in: `package main
+
+import (
+ "path"
+ . "path"
+ _ "path"
+)
+`,
+ out: `package main
+
+import (
+ "path"
+ . "path"
+ _ "path"
+ pathpkg "path"
+)
+`,
+ },
+ {
+ name: "issue 28605 add blank identifier path",
+ renamedPkg: "_",
+ pkg: "path",
+ in: `package main
+
+import (
+ "path"
+ . "path"
+ pathpkg "path"
+)
+`,
+ out: `package main
+
+import (
+ "path"
+ . "path"
+ _ "path"
+ pathpkg "path"
+)
+`,
+ },
+ {
+ name: "issue 28605 add dot import path",
+ renamedPkg: ".",
+ pkg: "path",
+ in: `package main
+
+import (
+ "path"
+ _ "path"
+ pathpkg "path"
+)
+`,
+ out: `package main
+
+import (
+ "path"
+ . "path"
+ _ "path"
+ pathpkg "path"
+)
+`,
+ },
+
+ {
+ name: "duplicate import declarations, add existing one",
+ renamedPkg: "f",
+ pkg: "fmt",
+ in: `package main
+
+import "fmt"
+import "fmt"
+import f "fmt"
+import f "fmt"
+`,
+ out: `package main
+
+import "fmt"
+import "fmt"
+import f "fmt"
+import f "fmt"
+`,
+ unchanged: true,
+ },
+}
+
+func TestAddImport(t *testing.T) {
+ for _, test := range addTests {
+ file := parse(t, test.name, test.in)
+ var before bytes.Buffer
+ ast.Fprint(&before, fset, file, nil)
+ added := AddNamedImport(fset, file, test.renamedPkg, test.pkg)
+ if got := print(t, test.name, file); got != test.out {
+ t.Errorf("first run: %s:\ngot: %s\nwant: %s", test.name, got, test.out)
+ var after bytes.Buffer
+ ast.Fprint(&after, fset, file, nil)
+ t.Logf("AST before:\n%s\nAST after:\n%s\n", before.String(), after.String())
+ }
+ if got, want := added, !test.unchanged; got != want {
+ t.Errorf("first run: %s: added = %v, want %v", test.name, got, want)
+ }
+
+ // AddNamedImport should be idempotent. Verify that by calling it again,
+ // expecting no change to the AST, and the returned added value to always be false.
+ added = AddNamedImport(fset, file, test.renamedPkg, test.pkg)
+ if got := print(t, test.name, file); got != test.out {
+ t.Errorf("second run: %s:\ngot: %s\nwant: %s", test.name, got, test.out)
+ }
+ if got, want := added, false; got != want {
+ t.Errorf("second run: %s: added = %v, want %v", test.name, got, want)
+ }
+ }
+}
+
+func TestDoubleAddImport(t *testing.T) {
+ file := parse(t, "doubleimport", "package main\n")
+ AddImport(fset, file, "os")
+ AddImport(fset, file, "bytes")
+ want := `package main
+
+import (
+ "bytes"
+ "os"
+)
+`
+ if got := print(t, "doubleimport", file); got != want {
+ t.Errorf("got: %s\nwant: %s", got, want)
+ }
+}
+
+func TestDoubleAddNamedImport(t *testing.T) {
+ file := parse(t, "doublenamedimport", "package main\n")
+ AddNamedImport(fset, file, "o", "os")
+ AddNamedImport(fset, file, "i", "io")
+ want := `package main
+
+import (
+ i "io"
+ o "os"
+)
+`
+ if got := print(t, "doublenamedimport", file); got != want {
+ t.Errorf("got: %s\nwant: %s", got, want)
+ }
+}
+
+// Part of issue 8729.
+func TestDoubleAddImportWithDeclComment(t *testing.T) {
+ file := parse(t, "doubleimport", `package main
+
+import (
+)
+
+// comment
+type I int
+`)
+ // The AddImport order here matters.
+ AddImport(fset, file, "golang.org/x/tools/go/ast/astutil")
+ AddImport(fset, file, "os")
+ want := `package main
+
+import (
+ "golang.org/x/tools/go/ast/astutil"
+ "os"
+)
+
+// comment
+type I int
+`
+ if got := print(t, "doubleimport_with_decl_comment", file); got != want {
+ t.Errorf("got: %s\nwant: %s", got, want)
+ }
+}
+
+var deleteTests = []test{
+ {
+ name: "import.4",
+ pkg: "os",
+ in: `package main
+
+import (
+ "os"
+)
+`,
+ out: `package main
+`,
+ },
+ {
+ name: "import.5",
+ pkg: "os",
+ in: `package main
+
+// Comment
+import "C"
+import "os"
+`,
+ out: `package main
+
+// Comment
+import "C"
+`,
+ },
+ {
+ name: "import.6",
+ pkg: "os",
+ in: `package main
+
+// Comment
+import "C"
+
+import (
+ "io"
+ "os"
+ "utf8"
+)
+`,
+ out: `package main
+
+// Comment
+import "C"
+
+import (
+ "io"
+ "utf8"
+)
+`,
+ },
+ {
+ name: "import.7",
+ pkg: "io",
+ in: `package main
+
+import (
+ "io" // a
+ "os" // b
+ "utf8" // c
+)
+`,
+ out: `package main
+
+import (
+ // a
+ "os" // b
+ "utf8" // c
+)
+`,
+ },
+ {
+ name: "import.8",
+ pkg: "os",
+ in: `package main
+
+import (
+ "io" // a
+ "os" // b
+ "utf8" // c
+)
+`,
+ out: `package main
+
+import (
+ "io" // a
+ // b
+ "utf8" // c
+)
+`,
+ },
+ {
+ name: "import.9",
+ pkg: "utf8",
+ in: `package main
+
+import (
+ "io" // a
+ "os" // b
+ "utf8" // c
+)
+`,
+ out: `package main
+
+import (
+ "io" // a
+ "os" // b
+ // c
+)
+`,
+ },
+ {
+ name: "import.10",
+ pkg: "io",
+ in: `package main
+
+import (
+ "io"
+ "os"
+ "utf8"
+)
+`,
+ out: `package main
+
+import (
+ "os"
+ "utf8"
+)
+`,
+ },
+ {
+ name: "import.11",
+ pkg: "os",
+ in: `package main
+
+import (
+ "io"
+ "os"
+ "utf8"
+)
+`,
+ out: `package main
+
+import (
+ "io"
+ "utf8"
+)
+`,
+ },
+ {
+ name: "import.12",
+ pkg: "utf8",
+ in: `package main
+
+import (
+ "io"
+ "os"
+ "utf8"
+)
+`,
+ out: `package main
+
+import (
+ "io"
+ "os"
+)
+`,
+ },
+ {
+ name: "handle.raw.quote.imports",
+ pkg: "os",
+ in: "package main\n\nimport `os`",
+ out: `package main
+`,
+ },
+ {
+ name: "import.13",
+ pkg: "io",
+ in: `package main
+
+import (
+ "fmt"
+
+ "io"
+ "os"
+ "utf8"
+
+ "go/format"
+)
+`,
+ out: `package main
+
+import (
+ "fmt"
+
+ "os"
+ "utf8"
+
+ "go/format"
+)
+`,
+ },
+ {
+ name: "import.14",
+ pkg: "io",
+ in: `package main
+
+import (
+ "fmt" // a
+
+ "io" // b
+ "os" // c
+ "utf8" // d
+
+ "go/format" // e
+)
+`,
+ out: `package main
+
+import (
+ "fmt" // a
+
+ // b
+ "os" // c
+ "utf8" // d
+
+ "go/format" // e
+)
+`,
+ },
+ {
+ name: "import.15",
+ pkg: "double",
+ in: `package main
+
+import (
+ "double"
+ "double"
+)
+`,
+ out: `package main
+`,
+ },
+ {
+ name: "import.16",
+ pkg: "bubble",
+ in: `package main
+
+import (
+ "toil"
+ "bubble"
+ "bubble"
+ "trouble"
+)
+`,
+ out: `package main
+
+import (
+ "toil"
+ "trouble"
+)
+`,
+ },
+ {
+ name: "import.17",
+ pkg: "quad",
+ in: `package main
+
+import (
+ "quad"
+ "quad"
+)
+
+import (
+ "quad"
+ "quad"
+)
+`,
+ out: `package main
+`,
+ },
+ {
+ name: "import.18",
+ renamedPkg: "x",
+ pkg: "fmt",
+ in: `package main
+
+import (
+ "fmt"
+ x "fmt"
+)
+`,
+ out: `package main
+
+import (
+ "fmt"
+)
+`,
+ },
+ {
+ name: "import.18",
+ renamedPkg: "x",
+ pkg: "fmt",
+ in: `package main
+
+import x "fmt"
+import y "fmt"
+`,
+ out: `package main
+
+import y "fmt"
+`,
+ },
+ // Issue #15432, #18051
+ {
+ name: "import.19",
+ pkg: "fmt",
+ in: `package main
+
+import (
+ "fmt"
+
+ // Some comment.
+ "io"
+)`,
+ out: `package main
+
+import (
+ // Some comment.
+ "io"
+)
+`,
+ },
+ {
+ name: "import.20",
+ pkg: "fmt",
+ in: `package main
+
+import (
+ "fmt"
+
+ // Some
+ // comment.
+ "io"
+)`,
+ out: `package main
+
+import (
+ // Some
+ // comment.
+ "io"
+)
+`,
+ },
+ {
+ name: "import.21",
+ pkg: "fmt",
+ in: `package main
+
+import (
+ "fmt"
+
+ /*
+ Some
+ comment.
+ */
+ "io"
+)`,
+ out: `package main
+
+import (
+ /*
+ Some
+ comment.
+ */
+ "io"
+)
+`,
+ },
+ {
+ name: "import.22",
+ pkg: "fmt",
+ in: `package main
+
+import (
+ /* Some */
+ // comment.
+ "io"
+ "fmt"
+)`,
+ out: `package main
+
+import (
+ /* Some */
+ // comment.
+ "io"
+)
+`,
+ },
+ {
+ name: "import.23",
+ pkg: "fmt",
+ in: `package main
+
+import (
+ // comment 1
+ "fmt"
+ // comment 2
+ "io"
+)`,
+ out: `package main
+
+import (
+ // comment 2
+ "io"
+)
+`,
+ },
+ {
+ name: "import.24",
+ pkg: "fmt",
+ in: `package main
+
+import (
+ "fmt" // comment 1
+ "io" // comment 2
+)`,
+ out: `package main
+
+import (
+ "io" // comment 2
+)
+`,
+ },
+ {
+ name: "import.25",
+ pkg: "fmt",
+ in: `package main
+
+import (
+ "fmt"
+ /* comment */ "io"
+)`,
+ out: `package main
+
+import (
+ /* comment */ "io"
+)
+`,
+ },
+ {
+ name: "import.26",
+ pkg: "fmt",
+ in: `package main
+
+import (
+ "fmt"
+ "io" /* comment */
+)`,
+ out: `package main
+
+import (
+ "io" /* comment */
+)
+`,
+ },
+ {
+ name: "import.27",
+ pkg: "fmt",
+ in: `package main
+
+import (
+ "fmt" /* comment */
+ "io"
+)`,
+ out: `package main
+
+import (
+ "io"
+)
+`,
+ },
+ {
+ name: "import.28",
+ pkg: "fmt",
+ in: `package main
+
+import (
+ /* comment */ "fmt"
+ "io"
+)`,
+ out: `package main
+
+import (
+ "io"
+)
+`,
+ },
+ {
+ name: "import.29",
+ pkg: "fmt",
+ in: `package main
+
+// comment 1
+import (
+ "fmt"
+ "io" // comment 2
+)`,
+ out: `package main
+
+// comment 1
+import (
+ "io" // comment 2
+)
+`,
+ },
+ {
+ name: "import.30",
+ pkg: "fmt",
+ in: `package main
+
+// comment 1
+import (
+ "fmt" // comment 2
+ "io"
+)`,
+ out: `package main
+
+// comment 1
+import (
+ "io"
+)
+`,
+ },
+ {
+ name: "import.31",
+ pkg: "fmt",
+ in: `package main
+
+// comment 1
+import (
+ "fmt"
+ /* comment 2 */ "io"
+)`,
+ out: `package main
+
+// comment 1
+import (
+ /* comment 2 */ "io"
+)
+`,
+ },
+ {
+ name: "import.32",
+ pkg: "fmt",
+ renamedPkg: "f",
+ in: `package main
+
+// comment 1
+import (
+ f "fmt"
+ /* comment 2 */ i "io"
+)`,
+ out: `package main
+
+// comment 1
+import (
+ /* comment 2 */ i "io"
+)
+`,
+ },
+ {
+ name: "import.33",
+ pkg: "fmt",
+ renamedPkg: "f",
+ in: `package main
+
+// comment 1
+import (
+ /* comment 2 */ f "fmt"
+ i "io"
+)`,
+ out: `package main
+
+// comment 1
+import (
+ i "io"
+)
+`,
+ },
+ {
+ name: "import.34",
+ pkg: "fmt",
+ renamedPkg: "f",
+ in: `package main
+
+// comment 1
+import (
+ f "fmt" /* comment 2 */
+ i "io"
+)`,
+ out: `package main
+
+// comment 1
+import (
+ i "io"
+)
+`,
+ },
+ {
+ name: "import.35",
+ pkg: "fmt",
+ in: `package main
+
+// comment 1
+import (
+ "fmt"
+ // comment 2
+ "io"
+)`,
+ out: `package main
+
+// comment 1
+import (
+ // comment 2
+ "io"
+)
+`,
+ },
+ {
+ name: "import.36",
+ pkg: "fmt",
+ in: `package main
+
+/* comment 1 */
+import (
+ "fmt"
+ /* comment 2 */
+ "io"
+)`,
+ out: `package main
+
+/* comment 1 */
+import (
+ /* comment 2 */
+ "io"
+)
+`,
+ },
+
+ // Issue 20229: MergeLine panic on weird input
+ {
+ name: "import.37",
+ pkg: "io",
+ in: `package main
+import("_"
+"io")`,
+ out: `package main
+
+import (
+ "_"
+)
+`,
+ },
+
+ // Issue 28605: Delete specified import, even if that import path is imported under another name
+ {
+ name: "import.38",
+ renamedPkg: "",
+ pkg: "path",
+ in: `package main
+
+import (
+ "path"
+ . "path"
+ _ "path"
+ pathpkg "path"
+)
+`,
+ out: `package main
+
+import (
+ . "path"
+ _ "path"
+ pathpkg "path"
+)
+`,
+ },
+ {
+ name: "import.39",
+ renamedPkg: "pathpkg",
+ pkg: "path",
+ in: `package main
+
+import (
+ "path"
+ . "path"
+ _ "path"
+ pathpkg "path"
+)
+`,
+ out: `package main
+
+import (
+ "path"
+ . "path"
+ _ "path"
+)
+`,
+ },
+ {
+ name: "import.40",
+ renamedPkg: "_",
+ pkg: "path",
+ in: `package main
+
+import (
+ "path"
+ . "path"
+ _ "path"
+ pathpkg "path"
+)
+`,
+ out: `package main
+
+import (
+ "path"
+ . "path"
+ pathpkg "path"
+)
+`,
+ },
+ {
+ name: "import.41",
+ renamedPkg: ".",
+ pkg: "path",
+ in: `package main
+
+import (
+ "path"
+ . "path"
+ _ "path"
+ pathpkg "path"
+)
+`,
+ out: `package main
+
+import (
+ "path"
+ _ "path"
+ pathpkg "path"
+)
+`,
+ },
+
+ // Duplicate import declarations, all matching ones are deleted.
+ {
+ name: "import.42",
+ renamedPkg: "f",
+ pkg: "fmt",
+ in: `package main
+
+import "fmt"
+import "fmt"
+import f "fmt"
+import f "fmt"
+`,
+ out: `package main
+
+import "fmt"
+import "fmt"
+`,
+ },
+ {
+ name: "import.43",
+ renamedPkg: "x",
+ pkg: "fmt",
+ in: `package main
+
+import "fmt"
+import "fmt"
+import f "fmt"
+import f "fmt"
+`,
+ out: `package main
+
+import "fmt"
+import "fmt"
+import f "fmt"
+import f "fmt"
+`,
+ unchanged: true,
+ },
+}
+
+func TestDeleteImport(t *testing.T) {
+ for _, test := range deleteTests {
+ file := parse(t, test.name, test.in)
+ var before bytes.Buffer
+ ast.Fprint(&before, fset, file, nil)
+ deleted := DeleteNamedImport(fset, file, test.renamedPkg, test.pkg)
+ if got := print(t, test.name, file); got != test.out {
+ t.Errorf("first run: %s:\ngot: %s\nwant: %s", test.name, got, test.out)
+ var after bytes.Buffer
+ ast.Fprint(&after, fset, file, nil)
+ t.Logf("AST before:\n%s\nAST after:\n%s\n", before.String(), after.String())
+ }
+ if got, want := deleted, !test.unchanged; got != want {
+ t.Errorf("first run: %s: deleted = %v, want %v", test.name, got, want)
+ }
+
+ // DeleteNamedImport should be idempotent. Verify that by calling it again,
+ // expecting no change to the AST, and the returned deleted value to always be false.
+ deleted = DeleteNamedImport(fset, file, test.renamedPkg, test.pkg)
+ if got := print(t, test.name, file); got != test.out {
+ t.Errorf("second run: %s:\ngot: %s\nwant: %s", test.name, got, test.out)
+ }
+ if got, want := deleted, false; got != want {
+ t.Errorf("second run: %s: deleted = %v, want %v", test.name, got, want)
+ }
+ }
+}
+
+type rewriteTest struct {
+ name string
+ srcPkg string
+ dstPkg string
+ in string
+ out string
+}
+
+var rewriteTests = []rewriteTest{
+ {
+ name: "import.13",
+ srcPkg: "utf8",
+ dstPkg: "encoding/utf8",
+ in: `package main
+
+import (
+ "io"
+ "os"
+ "utf8" // thanks ken
+)
+`,
+ out: `package main
+
+import (
+ "encoding/utf8" // thanks ken
+ "io"
+ "os"
+)
+`,
+ },
+ {
+ name: "import.14",
+ srcPkg: "asn1",
+ dstPkg: "encoding/asn1",
+ in: `package main
+
+import (
+ "asn1"
+ "crypto"
+ "crypto/rsa"
+ _ "crypto/sha1"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "time"
+)
+
+var x = 1
+`,
+ out: `package main
+
+import (
+ "crypto"
+ "crypto/rsa"
+ _ "crypto/sha1"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "time"
+)
+
+var x = 1
+`,
+ },
+ {
+ name: "import.15",
+ srcPkg: "url",
+ dstPkg: "net/url",
+ in: `package main
+
+import (
+ "bufio"
+ "net"
+ "path"
+ "url"
+)
+
+var x = 1 // comment on x, not on url
+`,
+ out: `package main
+
+import (
+ "bufio"
+ "net"
+ "net/url"
+ "path"
+)
+
+var x = 1 // comment on x, not on url
+`,
+ },
+ {
+ name: "import.16",
+ srcPkg: "http",
+ dstPkg: "net/http",
+ in: `package main
+
+import (
+ "flag"
+ "http"
+ "log"
+ "text/template"
+)
+
+var addr = flag.String("addr", ":1718", "http service address") // Q=17, R=18
+`,
+ out: `package main
+
+import (
+ "flag"
+ "log"
+ "net/http"
+ "text/template"
+)
+
+var addr = flag.String("addr", ":1718", "http service address") // Q=17, R=18
+`,
+ },
+}
+
+func TestRewriteImport(t *testing.T) {
+ for _, test := range rewriteTests {
+ file := parse(t, test.name, test.in)
+ RewriteImport(fset, file, test.srcPkg, test.dstPkg)
+ if got := print(t, test.name, file); got != test.out {
+ t.Errorf("%s:\ngot: %s\nwant: %s", test.name, got, test.out)
+ }
+ }
+}
+
+var importsTests = []struct {
+ name string
+ in string
+ want [][]string
+}{
+ {
+ name: "no packages",
+ in: `package foo
+`,
+ want: nil,
+ },
+ {
+ name: "one group",
+ in: `package foo
+
+import (
+ "fmt"
+ "testing"
+)
+`,
+ want: [][]string{{"fmt", "testing"}},
+ },
+ {
+ name: "four groups",
+ in: `package foo
+
+import "C"
+import (
+ "fmt"
+ "testing"
+
+ "appengine"
+
+ "myproject/mylib1"
+ "myproject/mylib2"
+)
+`,
+ want: [][]string{
+ {"C"},
+ {"fmt", "testing"},
+ {"appengine"},
+ {"myproject/mylib1", "myproject/mylib2"},
+ },
+ },
+ {
+ name: "multiple factored groups",
+ in: `package foo
+
+import (
+ "fmt"
+ "testing"
+
+ "appengine"
+)
+import (
+ "reflect"
+
+ "bytes"
+)
+`,
+ want: [][]string{
+ {"fmt", "testing"},
+ {"appengine"},
+ {"reflect"},
+ {"bytes"},
+ },
+ },
+}
+
+func unquote(s string) string {
+ res, err := strconv.Unquote(s)
+ if err != nil {
+ return "could_not_unquote"
+ }
+ return res
+}
+
+func TestImports(t *testing.T) {
+ fset := token.NewFileSet()
+ for _, test := range importsTests {
+ f, err := parser.ParseFile(fset, "test.go", test.in, 0)
+ if err != nil {
+ t.Errorf("%s: %v", test.name, err)
+ continue
+ }
+ var got [][]string
+ for _, group := range Imports(fset, f) {
+ var b []string
+ for _, spec := range group {
+ b = append(b, unquote(spec.Path.Value))
+ }
+ got = append(got, b)
+ }
+ if !reflect.DeepEqual(got, test.want) {
+ t.Errorf("Imports(%s)=%v, want %v", test.name, got, test.want)
+ }
+ }
+}
+
+var usesImportTests = []struct {
+ name string
+ path string
+ in string
+ want bool
+}{
+ {
+ name: "no packages",
+ path: "io",
+ in: `package foo
+`,
+ want: false,
+ },
+ {
+ name: "import.1",
+ path: "io",
+ in: `package foo
+
+import "io"
+
+var _ io.Writer
+`,
+ want: true,
+ },
+ {
+ name: "import.2",
+ path: "io",
+ in: `package foo
+
+import "io"
+`,
+ want: false,
+ },
+ {
+ name: "import.3",
+ path: "io",
+ in: `package foo
+
+import "io"
+
+var io = 42
+`,
+ want: false,
+ },
+ {
+ name: "import.4",
+ path: "io",
+ in: `package foo
+
+import i "io"
+
+var _ i.Writer
+`,
+ want: true,
+ },
+ {
+ name: "import.5",
+ path: "io",
+ in: `package foo
+
+import i "io"
+`,
+ want: false,
+ },
+ {
+ name: "import.6",
+ path: "io",
+ in: `package foo
+
+import i "io"
+
+var i = 42
+var io = 42
+`,
+ want: false,
+ },
+ {
+ name: "import.7",
+ path: "encoding/json",
+ in: `package foo
+
+import "encoding/json"
+
+var _ json.Encoder
+`,
+ want: true,
+ },
+ {
+ name: "import.8",
+ path: "encoding/json",
+ in: `package foo
+
+import "encoding/json"
+`,
+ want: false,
+ },
+ {
+ name: "import.9",
+ path: "encoding/json",
+ in: `package foo
+
+import "encoding/json"
+
+var json = 42
+`,
+ want: false,
+ },
+ {
+ name: "import.10",
+ path: "encoding/json",
+ in: `package foo
+
+import j "encoding/json"
+
+var _ j.Encoder
+`,
+ want: true,
+ },
+ {
+ name: "import.11",
+ path: "encoding/json",
+ in: `package foo
+
+import j "encoding/json"
+`,
+ want: false,
+ },
+ {
+ name: "import.12",
+ path: "encoding/json",
+ in: `package foo
+
+import j "encoding/json"
+
+var j = 42
+var json = 42
+`,
+ want: false,
+ },
+ {
+ name: "import.13",
+ path: "io",
+ in: `package foo
+
+import _ "io"
+`,
+ want: true,
+ },
+ {
+ name: "import.14",
+ path: "io",
+ in: `package foo
+
+import . "io"
+`,
+ want: true,
+ },
+}
+
+func TestUsesImport(t *testing.T) {
+ fset := token.NewFileSet()
+ for _, test := range usesImportTests {
+ f, err := parser.ParseFile(fset, "test.go", test.in, 0)
+ if err != nil {
+ t.Errorf("%s: %v", test.name, err)
+ continue
+ }
+ got := UsesImport(f, test.path)
+ if got != test.want {
+ t.Errorf("UsesImport(%s)=%v, want %v", test.name, got, test.want)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite_test.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite_test.go
new file mode 100644
index 0000000..1c86970
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite_test.go
@@ -0,0 +1,248 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package astutil_test
+
+import (
+ "bytes"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "testing"
+
+ "golang.org/x/tools/go/ast/astutil"
+)
+
+var rewriteTests = [...]struct {
+ name string
+ orig, want string
+ pre, post astutil.ApplyFunc
+}{
+ {name: "nop", orig: "package p\n", want: "package p\n"},
+
+ {name: "replace",
+ orig: `package p
+
+var x int
+`,
+ want: `package p
+
+var t T
+`,
+ post: func(c *astutil.Cursor) bool {
+ if _, ok := c.Node().(*ast.ValueSpec); ok {
+ c.Replace(valspec("t", "T"))
+ return false
+ }
+ return true
+ },
+ },
+
+ {name: "set doc strings",
+ orig: `package p
+
+const z = 0
+
+type T struct{}
+
+var x int
+`,
+ want: `package p
+// a foo is a foo
+const z = 0
+// a foo is a foo
+type T struct{}
+// a foo is a foo
+var x int
+`,
+ post: func(c *astutil.Cursor) bool {
+ if _, ok := c.Parent().(*ast.GenDecl); ok && c.Name() == "Doc" && c.Node() == nil {
+ c.Replace(&ast.CommentGroup{List: []*ast.Comment{{Text: "// a foo is a foo"}}})
+ }
+ return true
+ },
+ },
+
+ {name: "insert names",
+ orig: `package p
+
+const a = 1
+`,
+ want: `package p
+
+const a, b, c = 1, 2, 3
+`,
+ pre: func(c *astutil.Cursor) bool {
+ if _, ok := c.Parent().(*ast.ValueSpec); ok {
+ switch c.Name() {
+ case "Names":
+ c.InsertAfter(ast.NewIdent("c"))
+ c.InsertAfter(ast.NewIdent("b"))
+ case "Values":
+ c.InsertAfter(&ast.BasicLit{Kind: token.INT, Value: "3"})
+ c.InsertAfter(&ast.BasicLit{Kind: token.INT, Value: "2"})
+ }
+ }
+ return true
+ },
+ },
+
+ {name: "insert",
+ orig: `package p
+
+var (
+ x int
+ y int
+)
+`,
+ want: `package p
+
+var before1 int
+var before2 int
+
+var (
+ x int
+ y int
+)
+var after2 int
+var after1 int
+`,
+ pre: func(c *astutil.Cursor) bool {
+ if _, ok := c.Node().(*ast.GenDecl); ok {
+ c.InsertBefore(vardecl("before1", "int"))
+ c.InsertAfter(vardecl("after1", "int"))
+ c.InsertAfter(vardecl("after2", "int"))
+ c.InsertBefore(vardecl("before2", "int"))
+ }
+ return true
+ },
+ },
+
+ {name: "delete",
+ orig: `package p
+
+var x int
+var y int
+var z int
+`,
+ want: `package p
+
+var y int
+var z int
+`,
+ pre: func(c *astutil.Cursor) bool {
+ n := c.Node()
+ if d, ok := n.(*ast.GenDecl); ok && d.Specs[0].(*ast.ValueSpec).Names[0].Name == "x" {
+ c.Delete()
+ }
+ return true
+ },
+ },
+
+ {name: "insertafter-delete",
+ orig: `package p
+
+var x int
+var y int
+var z int
+`,
+ want: `package p
+
+var x1 int
+
+var y int
+var z int
+`,
+ pre: func(c *astutil.Cursor) bool {
+ n := c.Node()
+ if d, ok := n.(*ast.GenDecl); ok && d.Specs[0].(*ast.ValueSpec).Names[0].Name == "x" {
+ c.InsertAfter(vardecl("x1", "int"))
+ c.Delete()
+ }
+ return true
+ },
+ },
+
+ {name: "delete-insertafter",
+ orig: `package p
+
+var x int
+var y int
+var z int
+`,
+ want: `package p
+
+var y int
+var x1 int
+var z int
+`,
+ pre: func(c *astutil.Cursor) bool {
+ n := c.Node()
+ if d, ok := n.(*ast.GenDecl); ok && d.Specs[0].(*ast.ValueSpec).Names[0].Name == "x" {
+ c.Delete()
+ // The cursor is now effectively atop the 'var y int' node.
+ c.InsertAfter(vardecl("x1", "int"))
+ }
+ return true
+ },
+ },
+}
+
+func valspec(name, typ string) *ast.ValueSpec {
+ return &ast.ValueSpec{Names: []*ast.Ident{ast.NewIdent(name)},
+ Type: ast.NewIdent(typ),
+ }
+}
+
+func vardecl(name, typ string) *ast.GenDecl {
+ return &ast.GenDecl{
+ Tok: token.VAR,
+ Specs: []ast.Spec{valspec(name, typ)},
+ }
+}
+
+func TestRewrite(t *testing.T) {
+ t.Run("*", func(t *testing.T) {
+ for _, test := range rewriteTests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ t.Parallel()
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, test.name, test.orig, parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+ n := astutil.Apply(f, test.pre, test.post)
+ var buf bytes.Buffer
+ if err := format.Node(&buf, fset, n); err != nil {
+ t.Fatal(err)
+ }
+ got := buf.String()
+ if got != test.want {
+ t.Errorf("got:\n\n%s\nwant:\n\n%s\n", got, test.want)
+ }
+ })
+ }
+ })
+}
+
+var sink ast.Node
+
+func BenchmarkRewrite(b *testing.B) {
+ for _, test := range rewriteTests {
+ b.Run(test.name, func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, test.name, test.orig, parser.ParseComments)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+ sink = astutil.Apply(f, test.pre, test.post)
+ }
+ })
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/buildutil/allpackages_test.go b/vendor/golang.org/x/tools/go/buildutil/allpackages_test.go
index 1815512..ccdc31b 100644
--- a/vendor/golang.org/x/tools/go/buildutil/allpackages_test.go
+++ b/vendor/golang.org/x/tools/go/buildutil/allpackages_test.go
@@ -16,6 +16,7 @@ import (
"testing"
"golang.org/x/tools/go/buildutil"
+ "golang.org/x/tools/go/packages/packagestest"
)
func TestAllPackages(t *testing.T) {
@@ -23,7 +24,24 @@ func TestAllPackages(t *testing.T) {
t.Skip("gccgo has no standard packages")
}
- all := buildutil.AllPackages(&build.Default)
+ exported := packagestest.Export(t, packagestest.GOPATH, []packagestest.Module{
+ {Name: "golang.org/x/tools/go/buildutil", Files: packagestest.MustCopyFileTree(".")}})
+ defer exported.Cleanup()
+
+ var gopath string
+ for _, env := range exported.Config.Env {
+ if !strings.HasPrefix(env, "GOPATH=") {
+ continue
+ }
+ gopath = strings.TrimPrefix(env, "GOPATH=")
+ }
+ if gopath == "" {
+ t.Fatal("Failed to fish GOPATH out of env: ", exported.Config.Env)
+ }
+
+ var buildContext = build.Default
+ buildContext.GOPATH = gopath
+ all := buildutil.AllPackages(&buildContext)
set := make(map[string]bool)
for _, pkg := range all {
diff --git a/vendor/golang.org/x/tools/go/buildutil/util_test.go b/vendor/golang.org/x/tools/go/buildutil/util_test.go
index c72d59d..e676130 100644
--- a/vendor/golang.org/x/tools/go/buildutil/util_test.go
+++ b/vendor/golang.org/x/tools/go/buildutil/util_test.go
@@ -8,10 +8,13 @@ import (
"go/build"
"io/ioutil"
"os"
+ "path/filepath"
"runtime"
+ "strings"
"testing"
"golang.org/x/tools/go/buildutil"
+ "golang.org/x/tools/go/packages/packagestest"
)
func TestContainingPackage(t *testing.T) {
@@ -19,9 +22,22 @@ func TestContainingPackage(t *testing.T) {
t.Skip("gccgo has no GOROOT")
}
- // unvirtualized:
+ exported := packagestest.Export(t, packagestest.GOPATH, []packagestest.Module{
+ {Name: "golang.org/x/tools/go/buildutil", Files: packagestest.MustCopyFileTree(".")}})
+ defer exported.Cleanup()
+
goroot := runtime.GOROOT()
- gopath := gopathContainingTools(t)
+ var gopath string
+ for _, env := range exported.Config.Env {
+ if !strings.HasPrefix(env, "GOPATH=") {
+ continue
+ }
+ gopath = strings.TrimPrefix(env, "GOPATH=")
+ }
+ if gopath == "" {
+ t.Fatal("Failed to fish GOPATH out of env: ", exported.Config.Env)
+ }
+ buildutildir := filepath.Join(gopath, "golang.org", "x", "tools", "go", "buildutil")
type Test struct {
gopath, filename, wantPkg string
@@ -60,7 +76,7 @@ func TestContainingPackage(t *testing.T) {
var got string
var buildContext = build.Default
buildContext.GOPATH = test.gopath
- bp, err := buildutil.ContainingPackage(&buildContext, ".", test.filename)
+ bp, err := buildutil.ContainingPackage(&buildContext, buildutildir, test.filename)
if err != nil {
got = "(not found)"
} else {
@@ -71,15 +87,4 @@ func TestContainingPackage(t *testing.T) {
}
}
- // TODO(adonovan): test on virtualized GOPATH too.
-}
-
-// gopathContainingTools returns the path of the GOPATH workspace
-// with golang.org/x/tools, or fails the test if it can't locate it.
-func gopathContainingTools(t *testing.T) string {
- p, err := build.Import("golang.org/x/tools", "", build.FindOnly)
- if err != nil {
- t.Fatal(err)
- }
- return p.Root
}
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/example_test.go b/vendor/golang.org/x/tools/go/gcexportdata/example_test.go
index b67d55f..a50bc40 100644
--- a/vendor/golang.org/x/tools/go/gcexportdata/example_test.go
+++ b/vendor/golang.org/x/tools/go/gcexportdata/example_test.go
@@ -51,7 +51,12 @@ func ExampleRead() {
}
// Print package information.
- fmt.Printf("Package members: %s...\n", pkg.Scope().Names()[:5])
+ members := pkg.Scope().Names()
+ if members[0] == ".inittask" {
+ // An improvement to init handling in 1.13 added ".inittask". Remove so go >= 1.13 and go < 1.13 both pass.
+ members = members[1:]
+ }
+ fmt.Printf("Package members: %s...\n", members[:5])
println := pkg.Scope().Lookup("Println")
posn := fset.Position(println.Pos())
posn.Line = 123 // make example deterministic
@@ -70,15 +75,15 @@ func ExampleRead() {
// ExampleNewImporter demonstrates usage of NewImporter to provide type
// information for dependencies when type-checking Go source code.
func ExampleNewImporter() {
- const src = `package myscanner
+ const src = `package myrpc
-// choosing a package that is unlikely to change across releases
-import "text/scanner"
+// choosing a package that doesn't change across releases
+import "net/rpc"
-const eof = scanner.EOF
+const serverError rpc.ServerError = ""
`
fset := token.NewFileSet()
- f, err := parser.ParseFile(fset, "myscanner.go", src, 0)
+ f, err := parser.ParseFile(fset, "myrpc.go", src, 0)
if err != nil {
log.Fatal(err)
}
@@ -86,23 +91,22 @@ const eof = scanner.EOF
packages := make(map[string]*types.Package)
imp := gcexportdata.NewImporter(fset, packages)
conf := types.Config{Importer: imp}
- pkg, err := conf.Check("myscanner", fset, []*ast.File{f}, nil)
+ pkg, err := conf.Check("myrpc", fset, []*ast.File{f}, nil)
if err != nil {
log.Fatal(err)
}
// object from imported package
- pi := packages["text/scanner"].Scope().Lookup("EOF")
- fmt.Printf("const %s.%s %s = %s // %s\n",
+ pi := packages["net/rpc"].Scope().Lookup("ServerError")
+ fmt.Printf("type %s.%s %s // %s\n",
pi.Pkg().Path(),
pi.Name(),
- pi.Type(),
- pi.(*types.Const).Val(),
+ pi.Type().Underlying(),
slashify(fset.Position(pi.Pos())),
)
// object in source package
- twopi := pkg.Scope().Lookup("eof")
+ twopi := pkg.Scope().Lookup("serverError")
fmt.Printf("const %s %s = %s // %s\n",
twopi.Name(),
twopi.Type(),
@@ -112,8 +116,8 @@ const eof = scanner.EOF
// Output:
//
- // const text/scanner.EOF untyped int = -1 // $GOROOT/src/text/scanner/scanner.go:75:1
- // const eof untyped int = -1 // myscanner.go:6:7
+ // type net/rpc.ServerError string // $GOROOT/src/net/rpc/client.go:20:1
+ // const serverError net/rpc.ServerError = "" // myrpc.go:6:7
}
func slashify(posn token.Position) token.Position {
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport19_test.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport19_test.go
deleted file mode 100644
index 5c3cf2d..0000000
--- a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport19_test.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.9
-
-package gcimporter_test
-
-import (
- "go/ast"
- "go/parser"
- "go/token"
- "go/types"
- "testing"
-
- "golang.org/x/tools/go/internal/gcimporter"
-)
-
-const src = `
-package p
-
-type (
- T0 = int32
- T1 = struct{}
- T2 = struct{ T1 }
- Invalid = foo // foo is undeclared
-)
-`
-
-func checkPkg(t *testing.T, pkg *types.Package, label string) {
- T1 := types.NewStruct(nil, nil)
- T2 := types.NewStruct([]*types.Var{types.NewField(0, pkg, "T1", T1, true)}, nil)
-
- for _, test := range []struct {
- name string
- typ types.Type
- }{
- {"T0", types.Typ[types.Int32]},
- {"T1", T1},
- {"T2", T2},
- {"Invalid", types.Typ[types.Invalid]},
- } {
- obj := pkg.Scope().Lookup(test.name)
- if obj == nil {
- t.Errorf("%s: %s not found", label, test.name)
- continue
- }
- tname, _ := obj.(*types.TypeName)
- if tname == nil {
- t.Errorf("%s: %v not a type name", label, obj)
- continue
- }
- if !tname.IsAlias() {
- t.Errorf("%s: %v: not marked as alias", label, tname)
- continue
- }
- if got := tname.Type(); !types.Identical(got, test.typ) {
- t.Errorf("%s: %v: got %v; want %v", label, tname, got, test.typ)
- }
- }
-}
-
-func TestTypeAliases(t *testing.T) {
- // parse and typecheck
- fset1 := token.NewFileSet()
- f, err := parser.ParseFile(fset1, "p.go", src, 0)
- if err != nil {
- t.Fatal(err)
- }
- var conf types.Config
- pkg1, err := conf.Check("p", fset1, []*ast.File{f}, nil)
- if err == nil {
- // foo in undeclared in src; we should see an error
- t.Fatal("invalid source type-checked without error")
- }
- if pkg1 == nil {
- // despite incorrect src we should see a (partially) type-checked package
- t.Fatal("nil package returned")
- }
- checkPkg(t, pkg1, "export")
-
- // export
- exportdata, err := gcimporter.BExportData(fset1, pkg1)
- if err != nil {
- t.Fatal(err)
- }
-
- // import
- imports := make(map[string]*types.Package)
- fset2 := token.NewFileSet()
- _, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg1.Path())
- if err != nil {
- t.Fatalf("BImportData(%s): %v", pkg1.Path(), err)
- }
- checkPkg(t, pkg2, "import")
-}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport_test.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport_test.go
index e78b78d..89870b1 100644
--- a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport_test.go
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport_test.go
@@ -22,6 +22,8 @@ import (
"golang.org/x/tools/go/loader"
)
+var isRace = false
+
func TestBExportData_stdlib(t *testing.T) {
if runtime.Compiler == "gccgo" {
t.Skip("gccgo standard library is inaccessible")
@@ -29,6 +31,9 @@ func TestBExportData_stdlib(t *testing.T) {
if runtime.GOOS == "android" {
t.Skipf("incomplete std lib on %s", runtime.GOOS)
}
+ if isRace {
+ t.Skipf("stdlib tests take too long in race mode and flake on builders")
+ }
// Load, parse and type-check the program.
ctxt := build.Default // copy
@@ -333,3 +338,82 @@ func TestVeryLongFile(t *testing.T) {
posn2, want, posn1)
}
}
+
+const src = `
+package p
+
+type (
+ T0 = int32
+ T1 = struct{}
+ T2 = struct{ T1 }
+ Invalid = foo // foo is undeclared
+)
+`
+
+func checkPkg(t *testing.T, pkg *types.Package, label string) {
+ T1 := types.NewStruct(nil, nil)
+ T2 := types.NewStruct([]*types.Var{types.NewField(0, pkg, "T1", T1, true)}, nil)
+
+ for _, test := range []struct {
+ name string
+ typ types.Type
+ }{
+ {"T0", types.Typ[types.Int32]},
+ {"T1", T1},
+ {"T2", T2},
+ {"Invalid", types.Typ[types.Invalid]},
+ } {
+ obj := pkg.Scope().Lookup(test.name)
+ if obj == nil {
+ t.Errorf("%s: %s not found", label, test.name)
+ continue
+ }
+ tname, _ := obj.(*types.TypeName)
+ if tname == nil {
+ t.Errorf("%s: %v not a type name", label, obj)
+ continue
+ }
+ if !tname.IsAlias() {
+ t.Errorf("%s: %v: not marked as alias", label, tname)
+ continue
+ }
+ if got := tname.Type(); !types.Identical(got, test.typ) {
+ t.Errorf("%s: %v: got %v; want %v", label, tname, got, test.typ)
+ }
+ }
+}
+
+func TestTypeAliases(t *testing.T) {
+ // parse and typecheck
+ fset1 := token.NewFileSet()
+ f, err := parser.ParseFile(fset1, "p.go", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var conf types.Config
+ pkg1, err := conf.Check("p", fset1, []*ast.File{f}, nil)
+ if err == nil {
+ // foo in undeclared in src; we should see an error
+ t.Fatal("invalid source type-checked without error")
+ }
+ if pkg1 == nil {
+ // despite incorrect src we should see a (partially) type-checked package
+ t.Fatal("nil package returned")
+ }
+ checkPkg(t, pkg1, "export")
+
+ // export
+ exportdata, err := gcimporter.BExportData(fset1, pkg1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // import
+ imports := make(map[string]*types.Package)
+ fset2 := token.NewFileSet()
+ _, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg1.Path())
+ if err != nil {
+ t.Fatalf("BImportData(%s): %v", pkg1.Path(), err)
+ }
+ checkPkg(t, pkg2, "import")
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter11_test.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter11_test.go
new file mode 100644
index 0000000..1818681
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter11_test.go
@@ -0,0 +1,129 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.11
+
+package gcimporter
+
+import (
+ "go/types"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+var importedObjectTests = []struct {
+ name string
+ want string
+}{
+ // non-interfaces
+ {"crypto.Hash", "type Hash uint"},
+ {"go/ast.ObjKind", "type ObjKind int"},
+ {"go/types.Qualifier", "type Qualifier func(*Package) string"},
+ {"go/types.Comparable", "func Comparable(T Type) bool"},
+ {"math.Pi", "const Pi untyped float"},
+ {"math.Sin", "func Sin(x float64) float64"},
+ {"go/ast.NotNilFilter", "func NotNilFilter(_ string, v reflect.Value) bool"},
+ {"go/internal/gcimporter.BImportData", "func BImportData(fset *go/token.FileSet, imports map[string]*go/types.Package, data []byte, path string) (_ int, pkg *go/types.Package, err error)"},
+
+ // interfaces
+ {"context.Context", "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key interface{}) interface{}}"},
+ {"crypto.Decrypter", "type Decrypter interface{Decrypt(rand io.Reader, msg []byte, opts DecrypterOpts) (plaintext []byte, err error); Public() PublicKey}"},
+ {"encoding.BinaryMarshaler", "type BinaryMarshaler interface{MarshalBinary() (data []byte, err error)}"},
+ {"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"},
+ {"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"},
+ {"go/ast.Node", "type Node interface{End() go/token.Pos; Pos() go/token.Pos}"},
+ {"go/types.Type", "type Type interface{String() string; Underlying() Type}"},
+}
+
+func TestImportedTypes(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ for _, test := range importedObjectTests {
+ s := strings.Split(test.name, ".")
+ if len(s) != 2 {
+ t.Fatal("inconsistent test data")
+ }
+ importPath := s[0]
+ objName := s[1]
+
+ pkg, err := Import(make(map[string]*types.Package), importPath, ".", nil)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ obj := pkg.Scope().Lookup(objName)
+ if obj == nil {
+ t.Errorf("%s: object not found", test.name)
+ continue
+ }
+
+ got := types.ObjectString(obj, types.RelativeTo(pkg))
+ if got != test.want {
+ t.Errorf("%s: got %q; want %q", test.name, got, test.want)
+ }
+
+ if named, _ := obj.Type().(*types.Named); named != nil {
+ verifyInterfaceMethodRecvs(t, named, 0)
+ }
+ }
+}
+
+// verifyInterfaceMethodRecvs verifies that method receiver types
+// are named if the methods belong to a named interface type.
+func verifyInterfaceMethodRecvs(t *testing.T, named *types.Named, level int) {
+ // avoid endless recursion in case of an embedding bug that lead to a cycle
+ if level > 10 {
+ t.Errorf("%s: embeds itself", named)
+ return
+ }
+
+ iface, _ := named.Underlying().(*types.Interface)
+ if iface == nil {
+ return // not an interface
+ }
+
+ // check explicitly declared methods
+ for i := 0; i < iface.NumExplicitMethods(); i++ {
+ m := iface.ExplicitMethod(i)
+ recv := m.Type().(*types.Signature).Recv()
+ if recv == nil {
+ t.Errorf("%s: missing receiver type", m)
+ continue
+ }
+ if recv.Type() != named {
+ t.Errorf("%s: got recv type %s; want %s", m, recv.Type(), named)
+ }
+ }
+
+ // check embedded interfaces (if they are named, too)
+ for i := 0; i < iface.NumEmbeddeds(); i++ {
+ // embedding of interfaces cannot have cycles; recursion will terminate
+ if etype, _ := iface.EmbeddedType(i).(*types.Named); etype != nil {
+ verifyInterfaceMethodRecvs(t, etype, level+1)
+ }
+ }
+}
+func TestIssue25301(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ compileAndImportPkg(t, "issue25301")
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter_test.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter_test.go
index 56cdfc0..14622d3 100644
--- a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter_test.go
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter_test.go
@@ -73,22 +73,29 @@ func skipSpecialPlatforms(t *testing.T) {
}
}
-func compile(t *testing.T, dirname, filename string) string {
+// compile runs the compiler on filename, with dirname as the working directory,
+// and writes the output file to outdirname.
+func compile(t *testing.T, dirname, filename, outdirname string) string {
/* testenv. */ MustHaveGoBuild(t)
- cmd := exec.Command("go", "tool", "compile", filename)
+ // filename must end with ".go"
+ if !strings.HasSuffix(filename, ".go") {
+ t.Fatalf("filename doesn't end in .go: %s", filename)
+ }
+ basename := filepath.Base(filename)
+ outname := filepath.Join(outdirname, basename[:len(basename)-2]+"o")
+ cmd := exec.Command("go", "tool", "compile", "-o", outname, filename)
cmd.Dir = dirname
out, err := cmd.CombinedOutput()
if err != nil {
t.Logf("%s", out)
t.Fatalf("go tool compile %s failed: %s", filename, err)
}
- // filename should end with ".go"
- return filepath.Join(dirname, filename[:len(filename)-2]+"o")
+ return outname
}
func testPath(t *testing.T, path, srcDir string) *types.Package {
t0 := time.Now()
- pkg, err := Import(make(map[string]*types.Package), path, srcDir)
+ pkg, err := Import(make(map[string]*types.Package), path, srcDir, nil)
if err != nil {
t.Errorf("testPath(%s): %s", path, err)
return nil
@@ -128,6 +135,18 @@ func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
return
}
+func mktmpdir(t *testing.T) string {
+ tmpdir, err := ioutil.TempDir("", "gcimporter_test")
+ if err != nil {
+ t.Fatal("mktmpdir:", err)
+ }
+ if err := os.Mkdir(filepath.Join(tmpdir, "testdata"), 0700); err != nil {
+ os.RemoveAll(tmpdir)
+ t.Fatal("mktmpdir:", err)
+ }
+ return tmpdir
+}
+
const testfile = "exports.go"
func TestImportTestdata(t *testing.T) {
@@ -136,13 +155,14 @@ func TestImportTestdata(t *testing.T) {
t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
}
- if outFn := compile(t, "testdata", testfile); outFn != "" {
- defer os.Remove(outFn)
- }
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+
+ compile(t, "testdata", testfile, filepath.Join(tmpdir, "testdata"))
// filename should end with ".go"
filename := testfile[:len(testfile)-3]
- if pkg := testPath(t, "./testdata/"+filename, "."); pkg != nil {
+ if pkg := testPath(t, "./testdata/"+filename, tmpdir); pkg != nil {
// The package's Imports list must include all packages
// explicitly imported by testfile, plus all packages
// referenced indirectly via exported objects in testfile.
@@ -175,6 +195,13 @@ func TestVersionHandling(t *testing.T) {
t.Fatal(err)
}
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+ corruptdir := filepath.Join(tmpdir, "testdata", "versions")
+ if err := os.Mkdir(corruptdir, 0700); err != nil {
+ t.Fatal(err)
+ }
+
for _, f := range list {
name := f.Name()
if !strings.HasSuffix(name, ".a") {
@@ -185,9 +212,21 @@ func TestVersionHandling(t *testing.T) {
}
pkgpath := "./" + name[:len(name)-2]
+ if testing.Verbose() {
+ t.Logf("importing %s", name)
+ }
+
// test that export data can be imported
- _, err := Import(make(map[string]*types.Package), pkgpath, dir)
+ _, err := Import(make(map[string]*types.Package), pkgpath, dir, nil)
if err != nil {
+ // ok to fail if it fails with a newer version error for select files
+ if strings.Contains(err.Error(), "newer version") {
+ switch name {
+ case "test_go1.11_999b.a", "test_go1.11_999i.a":
+ continue
+ }
+ // fall through
+ }
t.Errorf("import %q failed: %v", pkgpath, err)
continue
}
@@ -210,12 +249,11 @@ func TestVersionHandling(t *testing.T) {
}
// 4) write the file
pkgpath += "_corrupted"
- filename := filepath.Join(dir, pkgpath) + ".a"
+ filename := filepath.Join(corruptdir, pkgpath) + ".a"
ioutil.WriteFile(filename, data, 0666)
- defer os.Remove(filename)
// test that importing the corrupted file results in an error
- _, err = Import(make(map[string]*types.Package), pkgpath, dir)
+ _, err = Import(make(map[string]*types.Package), pkgpath, corruptdir, nil)
if err == nil {
t.Errorf("import corrupted %q succeeded", pkgpath)
} else if msg := err.Error(); !strings.Contains(msg, "version skew") {
@@ -240,55 +278,6 @@ func TestImportStdLib(t *testing.T) {
t.Logf("tested %d imports", nimports)
}
-var importedObjectTests = []struct {
- name string
- want string
-}{
- {"math.Pi", "const Pi untyped float"},
- {"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"},
- // Go 1.7 and 1.8 don't know about embedded interfaces. Leave this
- // test out for now - the code is tested in the std library anyway.
- // TODO(gri) enable again once we're off 1.7 and 1.8.
- // {"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"},
- {"math.Sin", "func Sin(x float64) float64"},
- // TODO(gri) add more tests
-}
-
-func TestImportedTypes(t *testing.T) {
- skipSpecialPlatforms(t)
-
- // This package only handles gc export data.
- if runtime.Compiler != "gc" {
- t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
- }
-
- for _, test := range importedObjectTests {
- s := strings.Split(test.name, ".")
- if len(s) != 2 {
- t.Fatal("inconsistent test data")
- }
- importPath := s[0]
- objName := s[1]
-
- pkg, err := Import(make(map[string]*types.Package), importPath, ".")
- if err != nil {
- t.Error(err)
- continue
- }
-
- obj := pkg.Scope().Lookup(objName)
- if obj == nil {
- t.Errorf("%s: object not found", test.name)
- continue
- }
-
- got := types.ObjectString(obj, types.RelativeTo(pkg))
- if got != test.want {
- t.Errorf("%s: got %q; want %q", test.name, got, test.want)
- }
- }
-}
-
func TestIssue5815(t *testing.T) {
skipSpecialPlatforms(t)
@@ -297,7 +286,7 @@ func TestIssue5815(t *testing.T) {
t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
}
- pkg := importPkg(t, "strings")
+ pkg := importPkg(t, "strings", ".")
scope := pkg.Scope()
for _, name := range scope.Names() {
@@ -327,7 +316,7 @@ func TestCorrectMethodPackage(t *testing.T) {
}
imports := make(map[string]*types.Package)
- _, err := Import(imports, "net/http", ".")
+ _, err := Import(imports, "net/http", ".", nil)
if err != nil {
t.Fatal(err)
}
@@ -355,15 +344,22 @@ func TestIssue13566(t *testing.T) {
t.Skip("avoid dealing with relative paths/drive letters on windows")
}
- if f := compile(t, "testdata", "a.go"); f != "" {
- defer os.Remove(f)
- }
- if f := compile(t, "testdata", "b.go"); f != "" {
- defer os.Remove(f)
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+ testoutdir := filepath.Join(tmpdir, "testdata")
+
+ // b.go needs to be compiled from the output directory so that the compiler can
+ // find the compiled package a. We pass the full path to compile() so that we
+ // don't have to copy the file to that directory.
+ bpath, err := filepath.Abs(filepath.Join("testdata", "b.go"))
+ if err != nil {
+ t.Fatal(err)
}
+ compile(t, "testdata", "a.go", testoutdir)
+ compile(t, testoutdir, bpath, testoutdir)
// import must succeed (test for issue at hand)
- pkg := importPkg(t, "./testdata/b")
+ pkg := importPkg(t, "./testdata/b", tmpdir)
// make sure all indirectly imported packages have names
for _, imp := range pkg.Imports() {
@@ -383,7 +379,7 @@ func TestIssue13898(t *testing.T) {
// import go/internal/gcimporter which imports go/types partially
imports := make(map[string]*types.Package)
- _, err := Import(imports, "go/internal/gcimporter", ".")
+ _, err := Import(imports, "go/internal/gcimporter", ".", nil)
if err != nil {
t.Fatal(err)
}
@@ -433,9 +429,10 @@ func TestIssue15517(t *testing.T) {
t.Skip("avoid dealing with relative paths/drive letters on windows")
}
- if f := compile(t, "testdata", "p.go"); f != "" {
- defer os.Remove(f)
- }
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+
+ compile(t, "testdata", "p.go", filepath.Join(tmpdir, "testdata"))
// Multiple imports of p must succeed without redeclaration errors.
// We use an import path that's not cleaned up so that the eventual
@@ -451,7 +448,7 @@ func TestIssue15517(t *testing.T) {
// The same issue occurs with vendoring.)
imports := make(map[string]*types.Package)
for i := 0; i < 3; i++ {
- if _, err := Import(imports, "./././testdata/p", "."); err != nil {
+ if _, err := Import(imports, "./././testdata/p", tmpdir, nil); err != nil {
t.Fatal(err)
}
}
@@ -471,11 +468,7 @@ func TestIssue15920(t *testing.T) {
t.Skip("avoid dealing with relative paths/drive letters on windows")
}
- if f := compile(t, "testdata", "issue15920.go"); f != "" {
- defer os.Remove(f)
- }
-
- importPkg(t, "./testdata/issue15920")
+ compileAndImportPkg(t, "issue15920")
}
func TestIssue20046(t *testing.T) {
@@ -492,26 +485,29 @@ func TestIssue20046(t *testing.T) {
t.Skip("avoid dealing with relative paths/drive letters on windows")
}
- if f := compile(t, "testdata", "issue20046.go"); f != "" {
- defer os.Remove(f)
- }
-
// "./issue20046".V.M must exist
- pkg := importPkg(t, "./testdata/issue20046")
+ pkg := compileAndImportPkg(t, "issue20046")
obj := lookupObj(t, pkg.Scope(), "V")
if m, index, indirect := types.LookupFieldOrMethod(obj.Type(), false, nil, "M"); m == nil {
t.Fatalf("V.M not found (index = %v, indirect = %v)", index, indirect)
}
}
-func importPkg(t *testing.T, path string) *types.Package {
- pkg, err := Import(make(map[string]*types.Package), path, ".")
+func importPkg(t *testing.T, path, srcDir string) *types.Package {
+ pkg, err := Import(make(map[string]*types.Package), path, srcDir, nil)
if err != nil {
t.Fatal(err)
}
return pkg
}
+func compileAndImportPkg(t *testing.T, name string) *types.Package {
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+ compile(t, "testdata", name+".go", filepath.Join(tmpdir, "testdata"))
+ return importPkg(t, "./testdata/"+name, tmpdir)
+}
+
func lookupObj(t *testing.T, scope *types.Scope, name string) types.Object {
if obj := scope.Lookup(name); obj != nil {
return obj
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport_test.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport_test.go
new file mode 100644
index 0000000..3c91810
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport_test.go
@@ -0,0 +1,308 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a copy of bexport_test.go for iexport.go.
+
+// +build go1.11
+
+package gcimporter_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/constant"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "math/big"
+ "reflect"
+ "runtime"
+ "sort"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/buildutil"
+ "golang.org/x/tools/go/internal/gcimporter"
+ "golang.org/x/tools/go/loader"
+)
+
+func TestIExportData_stdlib(t *testing.T) {
+ if runtime.Compiler == "gccgo" {
+ t.Skip("gccgo standard library is inaccessible")
+ }
+ if runtime.GOOS == "android" {
+ t.Skipf("incomplete std lib on %s", runtime.GOOS)
+ }
+ if isRace {
+ t.Skipf("stdlib tests take too long in race mode and flake on builders")
+ }
+
+ // Load, parse and type-check the program.
+ ctxt := build.Default // copy
+ ctxt.GOPATH = "" // disable GOPATH
+ conf := loader.Config{
+ Build: &ctxt,
+ AllowErrors: true,
+ }
+ for _, path := range buildutil.AllPackages(conf.Build) {
+ conf.Import(path)
+ }
+
+ // Create a package containing type and value errors to ensure
+ // they are properly encoded/decoded.
+ f, err := conf.ParseFile("haserrors/haserrors.go", `package haserrors
+const UnknownValue = "" + 0
+type UnknownType undefined
+`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ conf.CreateFromFiles("haserrors", f)
+
+ prog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("Load failed: %v", err)
+ }
+
+ numPkgs := len(prog.AllPackages)
+ if want := 248; numPkgs < want {
+ t.Errorf("Loaded only %d packages, want at least %d", numPkgs, want)
+ }
+
+ var sorted []*types.Package
+ for pkg := range prog.AllPackages {
+ sorted = append(sorted, pkg)
+ }
+ sort.Slice(sorted, func(i, j int) bool {
+ return sorted[i].Path() < sorted[j].Path()
+ })
+
+ for _, pkg := range sorted {
+ info := prog.AllPackages[pkg]
+ if info.Files == nil {
+ continue // empty directory
+ }
+ exportdata, err := gcimporter.IExportData(conf.Fset, pkg)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if exportdata[0] == 'i' {
+ exportdata = exportdata[1:] // trim the 'i' in the header
+ } else {
+ t.Fatalf("unexpected first character of export data: %v", exportdata[0])
+ }
+
+ imports := make(map[string]*types.Package)
+ fset2 := token.NewFileSet()
+ n, pkg2, err := gcimporter.IImportData(fset2, imports, exportdata, pkg.Path())
+ if err != nil {
+ t.Errorf("IImportData(%s): %v", pkg.Path(), err)
+ continue
+ }
+ if n != len(exportdata) {
+ t.Errorf("IImportData(%s) decoded %d bytes, want %d",
+ pkg.Path(), n, len(exportdata))
+ }
+
+ // Compare the packages' corresponding members.
+ for _, name := range pkg.Scope().Names() {
+ if !ast.IsExported(name) {
+ continue
+ }
+ obj1 := pkg.Scope().Lookup(name)
+ obj2 := pkg2.Scope().Lookup(name)
+ if obj2 == nil {
+ t.Fatalf("%s.%s not found, want %s", pkg.Path(), name, obj1)
+ continue
+ }
+
+ fl1 := fileLine(conf.Fset, obj1)
+ fl2 := fileLine(fset2, obj2)
+ if fl1 != fl2 {
+ t.Errorf("%s.%s: got posn %s, want %s",
+ pkg.Path(), name, fl2, fl1)
+ }
+
+ if err := cmpObj(obj1, obj2); err != nil {
+ t.Errorf("%s.%s: %s\ngot: %s\nwant: %s",
+ pkg.Path(), name, err, obj2, obj1)
+ }
+ }
+ }
+}
+
+// TestVeryLongFile tests the position of an import object declared in
+// a very long input file. Line numbers greater than maxlines are
+// reported as line 1, not garbage or token.NoPos.
+func TestIExportData_long(t *testing.T) {
+ // parse and typecheck
+ longFile := "package foo" + strings.Repeat("\n", 123456) + "var X int"
+ fset1 := token.NewFileSet()
+ f, err := parser.ParseFile(fset1, "foo.go", longFile, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var conf types.Config
+ pkg, err := conf.Check("foo", fset1, []*ast.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // export
+ exportdata, err := gcimporter.IExportData(fset1, pkg)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if exportdata[0] == 'i' {
+ exportdata = exportdata[1:] // trim the 'i' in the header
+ } else {
+ t.Fatalf("unexpected first character of export data: %v", exportdata[0])
+ }
+
+ // import
+ imports := make(map[string]*types.Package)
+ fset2 := token.NewFileSet()
+ _, pkg2, err := gcimporter.IImportData(fset2, imports, exportdata, pkg.Path())
+ if err != nil {
+ t.Fatalf("IImportData(%s): %v", pkg.Path(), err)
+ }
+
+ // compare
+ posn1 := fset1.Position(pkg.Scope().Lookup("X").Pos())
+ posn2 := fset2.Position(pkg2.Scope().Lookup("X").Pos())
+ if want := "foo.go:1:1"; posn2.String() != want {
+ t.Errorf("X position = %s, want %s (orig was %s)",
+ posn2, want, posn1)
+ }
+}
+
+func TestIExportData_typealiases(t *testing.T) {
+ // parse and typecheck
+ fset1 := token.NewFileSet()
+ f, err := parser.ParseFile(fset1, "p.go", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var conf types.Config
+ pkg1, err := conf.Check("p", fset1, []*ast.File{f}, nil)
+ if err == nil {
+ // foo in undeclared in src; we should see an error
+ t.Fatal("invalid source type-checked without error")
+ }
+ if pkg1 == nil {
+ // despite incorrect src we should see a (partially) type-checked package
+ t.Fatal("nil package returned")
+ }
+ checkPkg(t, pkg1, "export")
+
+ // export
+ exportdata, err := gcimporter.IExportData(fset1, pkg1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if exportdata[0] == 'i' {
+ exportdata = exportdata[1:] // trim the 'i' in the header
+ } else {
+ t.Fatalf("unexpected first character of export data: %v", exportdata[0])
+ }
+
+ // import
+ imports := make(map[string]*types.Package)
+ fset2 := token.NewFileSet()
+ _, pkg2, err := gcimporter.IImportData(fset2, imports, exportdata, pkg1.Path())
+ if err != nil {
+ t.Fatalf("IImportData(%s): %v", pkg1.Path(), err)
+ }
+ checkPkg(t, pkg2, "import")
+}
+
+// cmpObj reports how x and y differ. They are assumed to belong to different
+// universes so cannot be compared directly. It is an adapted version of
+// equalObj in bexport_test.go.
+func cmpObj(x, y types.Object) error {
+ if reflect.TypeOf(x) != reflect.TypeOf(y) {
+ return fmt.Errorf("%T vs %T", x, y)
+ }
+ xt := x.Type()
+ yt := y.Type()
+ switch x.(type) {
+ case *types.Var, *types.Func:
+ // ok
+ case *types.Const:
+ xval := x.(*types.Const).Val()
+ yval := y.(*types.Const).Val()
+ equal := constant.Compare(xval, token.EQL, yval)
+ if !equal {
+ // try approx. comparison
+ xkind := xval.Kind()
+ ykind := yval.Kind()
+ if xkind == constant.Complex || ykind == constant.Complex {
+ equal = same(constant.Real(xval), constant.Real(yval)) &&
+ same(constant.Imag(xval), constant.Imag(yval))
+ } else if xkind == constant.Float || ykind == constant.Float {
+ equal = same(xval, yval)
+ } else if xkind == constant.Unknown && ykind == constant.Unknown {
+ equal = true
+ }
+ }
+ if !equal {
+ return fmt.Errorf("unequal constants %s vs %s", xval, yval)
+ }
+ case *types.TypeName:
+ xt = xt.Underlying()
+ yt = yt.Underlying()
+ default:
+ return fmt.Errorf("unexpected %T", x)
+ }
+ return equalType(xt, yt)
+}
+
+// Use the same floating-point precision (512) as cmd/compile
+// (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
+const mpprec = 512
+
+// same compares non-complex numeric values and reports if they are approximately equal.
+func same(x, y constant.Value) bool {
+ xf := constantToFloat(x)
+ yf := constantToFloat(y)
+ d := new(big.Float).Sub(xf, yf)
+ d.Abs(d)
+ eps := big.NewFloat(1.0 / (1 << (mpprec - 1))) // allow for 1 bit of error
+ return d.Cmp(eps) < 0
+}
+
+// copy of the function with the same name in iexport.go.
+func constantToFloat(x constant.Value) *big.Float {
+ var f big.Float
+ f.SetPrec(mpprec)
+ if v, exact := constant.Float64Val(x); exact {
+ // float64
+ f.SetFloat64(v)
+ } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
+ // TODO(gri): add big.Rat accessor to constant.Value.
+ n := valueToRat(num)
+ d := valueToRat(denom)
+ f.SetRat(n.Quo(n, d))
+ } else {
+ // Value too large to represent as a fraction => inaccessible.
+ // TODO(gri): add big.Float accessor to constant.Value.
+ _, ok := f.SetString(x.ExactString())
+ if !ok {
+ panic("should not reach here")
+ }
+ }
+ return &f
+}
+
+// copy of the function with the same name in iexport.go.
+func valueToRat(x constant.Value) *big.Rat {
+ // Convert little-endian to big-endian.
+ // I can't believe this is necessary.
+ bytes := constant.Bytes(x)
+ for i := 0; i < len(bytes)/2; i++ {
+ bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
+ }
+ return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
+}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/israce_test.go b/vendor/golang.org/x/tools/go/internal/gcimporter/israce_test.go
new file mode 100644
index 0000000..af8e52b
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/israce_test.go
@@ -0,0 +1,11 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build race
+
+package gcimporter_test
+
+func init() {
+ isRace = true
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee_test.go b/vendor/golang.org/x/tools/go/types/typeutil/callee_test.go
new file mode 100644
index 0000000..a0d107d
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/callee_test.go
@@ -0,0 +1,89 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil_test
+
+import (
+ "go/ast"
+ "go/importer"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "strings"
+ "testing"
+
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+func TestStaticCallee(t *testing.T) {
+ const src = `package p
+
+import "fmt"
+
+type T int
+
+func g(int)
+
+var f = g
+
+var x int
+
+type s struct{ f func(int) }
+func (s) g(int)
+
+type I interface{ f(int) }
+
+var a struct{b struct{c s}}
+
+func calls() {
+ g(x) // a declared func
+ s{}.g(x) // a concrete method
+ a.b.c.g(x) // same
+ fmt.Println(x) // declared func, qualified identifier
+}
+
+func noncalls() {
+ _ = T(x) // a type
+ f(x) // a var
+ panic(x) // a built-in
+ s{}.f(x) // a field
+ I(nil).f(x) // interface method
+}
+`
+ // parse
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "p.go", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // type-check
+ info := &types.Info{
+ Uses: make(map[*ast.Ident]types.Object),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ }
+ cfg := &types.Config{Importer: importer.For("source", nil)}
+ if _, err := cfg.Check("p", fset, []*ast.File{f}, info); err != nil {
+ t.Fatal(err)
+ }
+
+ for _, decl := range f.Decls {
+ if decl, ok := decl.(*ast.FuncDecl); ok && strings.HasSuffix(decl.Name.Name, "calls") {
+ wantCallee := decl.Name.Name == "calls" // false within func noncalls()
+ ast.Inspect(decl.Body, func(n ast.Node) bool {
+ if call, ok := n.(*ast.CallExpr); ok {
+ fn := typeutil.StaticCallee(info, call)
+ if fn == nil && wantCallee {
+ t.Errorf("%s: StaticCallee returned nil",
+ fset.Position(call.Lparen))
+ } else if fn != nil && !wantCallee {
+ t.Errorf("%s: StaticCallee returned %s, want nil",
+ fset.Position(call.Lparen), fn)
+ }
+ }
+ return true
+ })
+ }
+ }
+}
diff --git a/wasm/skycoin.go b/wasm/skycoin.go
index 7599694..615d9e4 100644
--- a/wasm/skycoin.go
+++ b/wasm/skycoin.go
@@ -77,29 +77,29 @@ func prepareTransactionWithSignatures(this js.Value, inputs []js.Value) (respons
// The following functions are simply wrappers to call the functions in
// liteclient/extras.go.
-func verifySignature(this js.Value, inputs []js.Value) (response interface{}) {
+func verifyPubKeySignedHash(this js.Value, inputs []js.Value) (response interface{}) {
defer recoverFromPanic(&response)
checkParams(&inputs)
- liteclient.VerifySignature(inputs[0].String(), inputs[1].String(), inputs[2].String())
+ liteclient.VerifyPubKeySignedHash(inputs[0].String(), inputs[1].String(), inputs[2].String())
return
}
-func chkSig(this js.Value, inputs []js.Value) (response interface{}) {
+func verifyAddressSignedHash(this js.Value, inputs []js.Value) (response interface{}) {
defer recoverFromPanic(&response)
checkParams(&inputs)
- liteclient.ChkSig(inputs[0].String(), inputs[1].String(), inputs[2].String())
+ liteclient.VerifyAddressSignedHash(inputs[0].String(), inputs[1].String(), inputs[2].String())
return
}
-func verifySignedHash(this js.Value, inputs []js.Value) (response interface{}) {
+func verifySignatureRecoverPubKey(this js.Value, inputs []js.Value) (response interface{}) {
defer recoverFromPanic(&response)
checkParams(&inputs)
- liteclient.VerifySignedHash(inputs[0].String(), inputs[1].String())
+ liteclient.VerifySignatureRecoverPubKey(inputs[0].String(), inputs[1].String())
return
}
@@ -172,9 +172,9 @@ func main() {
// Add the extra functions to the the "window.SkycoinCipherExtras" object.
cipherExtrasNamespace := "SkycoinCipherExtras"
js.Global().Set(cipherExtrasNamespace, js.FuncOf(nil))
- js.Global().Get(cipherExtrasNamespace).Set("verifySignature", js.FuncOf(verifySignature))
- js.Global().Get(cipherExtrasNamespace).Set("chkSig", js.FuncOf(chkSig))
- js.Global().Get(cipherExtrasNamespace).Set("verifySignedHash", js.FuncOf(verifySignedHash))
+ js.Global().Get(cipherExtrasNamespace).Set("verifyPubKeySignedHash", js.FuncOf(verifyPubKeySignedHash))
+ js.Global().Get(cipherExtrasNamespace).Set("verifyAddressSignedHash", js.FuncOf(verifyAddressSignedHash))
+ js.Global().Get(cipherExtrasNamespace).Set("verifySignatureRecoverPubKey", js.FuncOf(verifySignatureRecoverPubKey))
js.Global().Get(cipherExtrasNamespace).Set("verifySeckey", js.FuncOf(verifySeckey))
js.Global().Get(cipherExtrasNamespace).Set("verifyPubkey", js.FuncOf(verifyPubkey))
js.Global().Get(cipherExtrasNamespace).Set("addressFromPubKey", js.FuncOf(addressFromPubKey))
From 80a7be764498c2f7c47c5e4675e7c81d1311ae14 Mon Sep 17 00:00:00 2001
From: Senyoret1 <34079003+Senyoret1@users.noreply.github.com>
Date: Sat, 13 Jul 2019 14:16:14 -0400
Subject: [PATCH 6/8] Fix the lint procedure
---
Makefile | 4 ++--
js/karma-wasm.conf.js | 2 --
wasm/skycoin.go | 6 ++++--
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/Makefile b/Makefile
index 0052176..46eb218 100644
--- a/Makefile
+++ b/Makefile
@@ -40,9 +40,9 @@ test:
lint: ## Run linters. Use make install-linters first.
vendorcheck ./...
- golangci-lint run -c ./.golangci.yml ./...
+ GOOS=js GOARCH=wasm golangci-lint run -c ./.golangci.yml ./...
@# The govet version in golangci-lint is out of date and has spurious warnings, run it separately
- go vet -all ./...
+ GOOS=js GOARCH=wasm go vet -all ./...
check: lint test ## Run tests and linters
diff --git a/js/karma-wasm.conf.js b/js/karma-wasm.conf.js
index 4410527..3e8b3bc 100644
--- a/js/karma-wasm.conf.js
+++ b/js/karma-wasm.conf.js
@@ -17,8 +17,6 @@ module.exports = function (config) {
'tests/cipher-wasm.spec.ts',
{ pattern: 'tests/test-fixtures/*.golden', included: false },
{ pattern: 'skycoin-lite.wasm', included: false },
- { pattern: 'test1.wasm', included: false },
- { pattern: 'test2.wasm', included: false },
{ pattern: 'tests/utils.ts', included: true },
{ pattern: 'tests/wasm_exec.js', included: true },
],
diff --git a/wasm/skycoin.go b/wasm/skycoin.go
index 615d9e4..b7de5e8 100644
--- a/wasm/skycoin.go
+++ b/wasm/skycoin.go
@@ -1,3 +1,5 @@
+// +build js,wasm
+
package main
import (
@@ -9,8 +11,8 @@ import (
// recoverFromPanic captures the panics and returns an object with the error message.
// It must be used in all the functions that can be called using the compiled wasm
-// file, as the Go code contains multiple panics that would completelly stop the
-// excecution of the wasm application without returning adequate errors to the JS code.
+// file, as the Go code contains multiple panics that would completely stop the
+// execution of the wasm application without returning adequate errors to the JS code.
func recoverFromPanic(response *interface{}) {
if err := recover(); err != nil {
finalResponse := make(map[string]interface{})
From 99cfc29d3c51d6e50973db07da4534e68b7129be Mon Sep 17 00:00:00 2001
From: Senyoret1 <34079003+Senyoret1@users.noreply.github.com>
Date: Sat, 13 Jul 2019 14:40:19 -0400
Subject: [PATCH 7/8] Fix the wasm tests
---
js/tests/cipher-wasm-internal.js | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/js/tests/cipher-wasm-internal.js b/js/tests/cipher-wasm-internal.js
index dd25954..ef80721 100644
--- a/js/tests/cipher-wasm-internal.js
+++ b/js/tests/cipher-wasm-internal.js
@@ -1,6 +1,10 @@
// Runs the tests from src/cipher/sec256k1-go/ and src/cipher/sec256k1-go/secp256k1-go2/.
// It needs the tests to be compiled, so it was created for being called by "make test-suite-ts-wasm"
+// This is done to make wasm_exec think that it is not running inside Node.js, so that it does not ask
+// for special parameters and works in a similar way as how it would do in an browser.
+global.process.title = '';
+
// Required for wasm_exec to work correctly in Node.js.
const util = require('util');
TextEncoder = util.TextEncoder;
From d087929123654cfb28f3fde5e1b8b3fe9ef85841 Mon Sep 17 00:00:00 2001
From: Senyoret1 <34079003+Senyoret1@users.noreply.github.com>
Date: Mon, 15 Jul 2019 17:32:48 -0400
Subject: [PATCH 8/8] Run a wasm vrsion of the /cipher tests
---
Gopkg.lock | 22 +-
Gopkg.toml | 6 +-
Makefile | 2 +
js/tests/cipher-wasm-internal.js | 31 +-
vendor/github.com/davecgh/go-spew/LICENSE | 15 +
.../github.com/davecgh/go-spew/spew/bypass.go | 145 ++
.../davecgh/go-spew/spew/bypasssafe.go | 38 +
.../github.com/davecgh/go-spew/spew/common.go | 341 ++++
.../davecgh/go-spew/spew/common_test.go | 298 ++++
.../github.com/davecgh/go-spew/spew/config.go | 306 ++++
vendor/github.com/davecgh/go-spew/spew/doc.go | 211 +++
.../github.com/davecgh/go-spew/spew/dump.go | 509 ++++++
.../davecgh/go-spew/spew/dump_test.go | 1042 +++++++++++
.../davecgh/go-spew/spew/dumpcgo_test.go | 101 ++
.../davecgh/go-spew/spew/dumpnocgo_test.go | 26 +
.../davecgh/go-spew/spew/example_test.go | 226 +++
.../github.com/davecgh/go-spew/spew/format.go | 419 +++++
.../davecgh/go-spew/spew/format_test.go | 1558 +++++++++++++++++
.../davecgh/go-spew/spew/internal_test.go | 84 +
.../go-spew/spew/internalunsafe_test.go | 101 ++
.../github.com/davecgh/go-spew/spew/spew.go | 148 ++
.../davecgh/go-spew/spew/spew_test.go | 320 ++++
vendor/github.com/pmezard/go-difflib/LICENSE | 27 +
.../pmezard/go-difflib/difflib/difflib.go | 772 ++++++++
.../go-difflib/difflib/difflib_test.go | 426 +++++
.../github.com/stretchr/testify/LICENCE.txt | 22 +
vendor/github.com/stretchr/testify/LICENSE | 22 +
.../testify/assert/assertion_format.go | 379 ++++
.../testify/assert/assertion_format.go.tmpl | 4 +
.../testify/assert/assertion_forward.go | 746 ++++++++
.../testify/assert/assertion_forward.go.tmpl | 4 +
.../stretchr/testify/assert/assertions.go | 1208 +++++++++++++
.../testify/assert/assertions_test.go | 1406 +++++++++++++++
.../github.com/stretchr/testify/assert/doc.go | 45 +
.../stretchr/testify/assert/errors.go | 10 +
.../testify/assert/forward_assertions.go | 16 +
.../testify/assert/forward_assertions_test.go | 611 +++++++
.../testify/assert/http_assertions.go | 127 ++
.../testify/assert/http_assertions_test.go | 117 ++
.../stretchr/testify/require/doc.go | 28 +
.../testify/require/forward_requirements.go | 16 +
.../require/forward_requirements_test.go | 385 ++++
.../stretchr/testify/require/require.go | 911 ++++++++++
.../stretchr/testify/require/require.go.tmpl | 6 +
.../testify/require/require_forward.go | 747 ++++++++
.../testify/require/require_forward.go.tmpl | 4 +
.../stretchr/testify/require/requirements.go | 9 +
.../testify/require/requirements_test.go | 369 ++++
48 files changed, 14345 insertions(+), 21 deletions(-)
create mode 100644 vendor/github.com/davecgh/go-spew/LICENSE
create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/common_test.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump_test.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/example_test.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/format_test.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/internal_test.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew_test.go
create mode 100644 vendor/github.com/pmezard/go-difflib/LICENSE
create mode 100644 vendor/github.com/pmezard/go-difflib/difflib/difflib.go
create mode 100644 vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go
create mode 100644 vendor/github.com/stretchr/testify/LICENCE.txt
create mode 100644 vendor/github.com/stretchr/testify/LICENSE
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_format.go
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
create mode 100644 vendor/github.com/stretchr/testify/assert/assertions.go
create mode 100644 vendor/github.com/stretchr/testify/assert/assertions_test.go
create mode 100644 vendor/github.com/stretchr/testify/assert/doc.go
create mode 100644 vendor/github.com/stretchr/testify/assert/errors.go
create mode 100644 vendor/github.com/stretchr/testify/assert/forward_assertions.go
create mode 100644 vendor/github.com/stretchr/testify/assert/forward_assertions_test.go
create mode 100644 vendor/github.com/stretchr/testify/assert/http_assertions.go
create mode 100644 vendor/github.com/stretchr/testify/assert/http_assertions_test.go
create mode 100644 vendor/github.com/stretchr/testify/require/doc.go
create mode 100644 vendor/github.com/stretchr/testify/require/forward_requirements.go
create mode 100644 vendor/github.com/stretchr/testify/require/forward_requirements_test.go
create mode 100644 vendor/github.com/stretchr/testify/require/require.go
create mode 100644 vendor/github.com/stretchr/testify/require/require.go.tmpl
create mode 100644 vendor/github.com/stretchr/testify/require/require_forward.go
create mode 100644 vendor/github.com/stretchr/testify/require/require_forward.go.tmpl
create mode 100644 vendor/github.com/stretchr/testify/require/requirements.go
create mode 100644 vendor/github.com/stretchr/testify/require/requirements_test.go
diff --git a/Gopkg.lock b/Gopkg.lock
index fb20ff6..aba41a6 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -1,6 +1,12 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+[[projects]]
+ name = "github.com/davecgh/go-spew"
+ packages = ["spew"]
+ revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
+ version = "v1.1.1"
+
[[projects]]
name = "github.com/fsnotify/fsnotify"
packages = ["."]
@@ -53,6 +59,12 @@
packages = ["."]
revision = "8c68805598ab8d5637b1a72b5f7d381ea0f39c31"
+[[projects]]
+ name = "github.com/pmezard/go-difflib"
+ packages = ["difflib"]
+ revision = "792786c7400a136282c1664665ae0a8db921c6c2"
+ version = "v1.0.0"
+
[[projects]]
branch = "master"
name = "github.com/shurcooL/httpfs"
@@ -92,6 +104,14 @@
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.3"
+[[projects]]
+ name = "github.com/stretchr/testify"
+ packages = [
+ "assert",
+ "require"
+ ]
+ revision = "2aa2c176b9dab406a6970f6a55f513e8a8c8b18f"
+
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
@@ -122,6 +142,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
- inputs-digest = "5c40980efb4f3f523fd74f51d7e55c9b72280a110c63004b3f128618740eea84"
+ inputs-digest = "adab7322fb3b2020308217373e3e6d8ea5e385227912111af0940416f6d99d67"
solver-name = "gps-cdcl"
solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
index 8454e7c..ad7807f 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -24,11 +24,15 @@
# go-tests = true
# unused-packages = true
-required = ["github.com/gopherjs/gopherjs"]
+required = ["github.com/gopherjs/gopherjs", "github.com/stretchr/testify/require"]
[[constraint]]
branch = "develop"
name = "github.com/skycoin/skycoin"
+[[constraint]]
+ name = "github.com/stretchr/testify"
+ revision = "2aa2c176b9dab406a6970f6a55f513e8a8c8b18f"
+
[prune]
unused-packages = true
diff --git a/Makefile b/Makefile
index 46eb218..ad61cae 100644
--- a/Makefile
+++ b/Makefile
@@ -28,10 +28,12 @@ test-suite-ts-extensive: ## Run the ts version of the cipher test suite for Goph
cd js && npm run test-extensive
test-suite-ts-wasm: ## Run the ts version of the cipher test suite for wasm and additional tests
+ cd vendor/github.com/skycoin/skycoin/src/cipher && GOOS=js GOARCH=wasm go test -c -o test.wasm
cd vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go && GOOS=js GOARCH=wasm go test -c -o test.wasm
cd vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2 && GOOS=js GOARCH=wasm go test -c -o test.wasm
cd js && npm run test-wasm
cd js/tests && node cipher-wasm-internal.js
+ cd vendor/github.com/skycoin/skycoin/src/cipher && rm test.wasm
cd vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go && rm test.wasm
cd vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2 && rm test.wasm
diff --git a/js/tests/cipher-wasm-internal.js b/js/tests/cipher-wasm-internal.js
index ef80721..d769401 100644
--- a/js/tests/cipher-wasm-internal.js
+++ b/js/tests/cipher-wasm-internal.js
@@ -1,4 +1,4 @@
-// Runs the tests from src/cipher/sec256k1-go/ and src/cipher/sec256k1-go/secp256k1-go2/.
+// Runs the tests from src/cipher/, src/cipher/sec256k1-go/ and src/cipher/sec256k1-go/secp256k1-go2/.
// It needs the tests to be compiled, so it was created for being called by "make test-suite-ts-wasm"
// This is done to make wasm_exec think that it is not running inside Node.js, so that it does not ask
@@ -37,30 +37,23 @@ console.warn = (message, ...optionalParams) => {
process.exit(1);
};
-runTest1 = function() {
- const testFile = fs.readFileSync('../../vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/test.wasm', null).buffer;
- const go = new global.Go();
- WebAssembly.instantiate(testFile, go.importObject).then(result => {
- go.run(result.instance).then(() => {
- runTest2();
- }, err => {
- console.log(err);
- process.exit(1);
- });
- });
-}
+let testFiles = [
+ '../../vendor/github.com/skycoin/skycoin/src/cipher/test.wasm',
+ '../../vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/test.wasm',
+ '../../vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/test.wasm',
+];
-runTest2 = function() {
- const testFile = fs.readFileSync('../../vendor/github.com/skycoin/skycoin/src/cipher/secp256k1-go/secp256k1-go2/test.wasm', null).buffer;
+runTest = function(testFilePath) {
+ const testFile = fs.readFileSync(testFilePath, null).buffer;
const go = new global.Go();
WebAssembly.instantiate(testFile, go.importObject).then(result => {
- go.run(result.instance).then(() => {
-
- }, err => {
+ go.run(result.instance).then(null, err => {
console.log(err);
process.exit(1);
});
});
}
-runTest1();
+testFiles.forEach(file => {
+ runTest(file);
+});
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 0000000..bc52e96
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 0000000..7929947
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,145 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+type flag uintptr
+
+var (
+ // flagRO indicates whether the value field of a reflect.Value
+ // is read-only.
+ flagRO flag
+
+ // flagAddr indicates whether the address of the reflect.Value's
+ // value may be taken.
+ flagAddr flag
+)
+
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
+
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+ ro, addr flag
+}{{
+ // From Go 1.4 to 1.5
+ ro: 1 << 5,
+ addr: 1 << 7,
+}, {
+ // Up to Go tip.
+ ro: 1<<5 | 1<<6,
+ addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+ return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+ return v
+ }
+ flagFieldPtr := flagField(&v)
+ *flagFieldPtr &^= flagRO
+ *flagFieldPtr |= flagAddr
+ return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+ panic("reflect.Value flag field has changed kind")
+ }
+ type t0 int
+ var t struct {
+ A t0
+ // t0 will have flagEmbedRO set.
+ t0
+ // a will have flagStickyRO set
+ a t0
+ }
+ vA := reflect.ValueOf(t).FieldByName("A")
+ va := reflect.ValueOf(t).FieldByName("a")
+ vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+ // Infer flagRO from the difference between the flags
+ // for the (otherwise identical) fields in t.
+ flagPublic := *flagField(&vA)
+ flagWithRO := *flagField(&va) | *flagField(&vt0)
+ flagRO = flagPublic ^ flagWithRO
+
+ // Infer flagAddr from the difference between a value
+ // taken from a pointer and not.
+ vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+ flagNoPtr := *flagField(&vA)
+ flagPtr := *flagField(&vPtrA)
+ flagAddr = flagNoPtr ^ flagPtr
+
+ // Check that the inferred flags tally with one of the known versions.
+ for _, f := range okFlags {
+ if flagRO == f.ro && flagAddr == f.addr {
+ return
+ }
+ }
+ panic("reflect.Value read-only flag has changed semantics")
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 0000000..205c28d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe !go1.4
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 0000000..1be8ce9
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("")
+ maxNewlineBytes = []byte("\n")
+ maxShortBytes = []byte("")
+ circularBytes = []byte("")
+ circularShortBytes = []byte("")
+ invalidAngleBytes = []byte("")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common_test.go b/vendor/github.com/davecgh/go-spew/spew/common_test.go
new file mode 100644
index 0000000..0f5ce47
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common_test.go
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew_test
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+// custom type to test Stinger interface on non-pointer receiver.
+type stringer string
+
+// String implements the Stringer interface for testing invocation of custom
+// stringers on types with non-pointer receivers.
+func (s stringer) String() string {
+ return "stringer " + string(s)
+}
+
+// custom type to test Stinger interface on pointer receiver.
+type pstringer string
+
+// String implements the Stringer interface for testing invocation of custom
+// stringers on types with only pointer receivers.
+func (s *pstringer) String() string {
+ return "stringer " + string(*s)
+}
+
+// xref1 and xref2 are cross referencing structs for testing circular reference
+// detection.
+type xref1 struct {
+ ps2 *xref2
+}
+type xref2 struct {
+ ps1 *xref1
+}
+
+// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular
+// reference for testing detection.
+type indirCir1 struct {
+ ps2 *indirCir2
+}
+type indirCir2 struct {
+ ps3 *indirCir3
+}
+type indirCir3 struct {
+ ps1 *indirCir1
+}
+
+// embed is used to test embedded structures.
+type embed struct {
+ a string
+}
+
+// embedwrap is used to test embedded structures.
+type embedwrap struct {
+ *embed
+ e *embed
+}
+
+// panicer is used to intentionally cause a panic for testing spew properly
+// handles them
+type panicer int
+
+func (p panicer) String() string {
+ panic("test panic")
+}
+
+// customError is used to test custom error interface invocation.
+type customError int
+
+func (e customError) Error() string {
+ return fmt.Sprintf("error: %d", int(e))
+}
+
+// stringizeWants converts a slice of wanted test output into a format suitable
+// for a test error message.
+func stringizeWants(wants []string) string {
+ s := ""
+ for i, want := range wants {
+ if i > 0 {
+ s += fmt.Sprintf("want%d: %s", i+1, want)
+ } else {
+ s += "want: " + want
+ }
+ }
+ return s
+}
+
+// testFailed returns whether or not a test failed by checking if the result
+// of the test is in the slice of wanted strings.
+func testFailed(result string, wants []string) bool {
+ for _, want := range wants {
+ if result == want {
+ return false
+ }
+ }
+ return true
+}
+
+type sortableStruct struct {
+ x int
+}
+
+func (ss sortableStruct) String() string {
+ return fmt.Sprintf("ss.%d", ss.x)
+}
+
+type unsortableStruct struct {
+ x int
+}
+
+type sortTestCase struct {
+ input []reflect.Value
+ expected []reflect.Value
+}
+
+func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) {
+ getInterfaces := func(values []reflect.Value) []interface{} {
+ interfaces := []interface{}{}
+ for _, v := range values {
+ interfaces = append(interfaces, v.Interface())
+ }
+ return interfaces
+ }
+
+ for _, test := range tests {
+ spew.SortValues(test.input, cs)
+ // reflect.DeepEqual cannot really make sense of reflect.Value,
+ // probably because of all the pointer tricks. For instance,
+ // v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{}
+ // instead.
+ input := getInterfaces(test.input)
+ expected := getInterfaces(test.expected)
+ if !reflect.DeepEqual(input, expected) {
+ t.Errorf("Sort mismatch:\n %v != %v", input, expected)
+ }
+ }
+}
+
+// TestSortValues ensures the sort functionality for relect.Value based sorting
+// works as intended.
+func TestSortValues(t *testing.T) {
+ v := reflect.ValueOf
+
+ a := v("a")
+ b := v("b")
+ c := v("c")
+ embedA := v(embed{"a"})
+ embedB := v(embed{"b"})
+ embedC := v(embed{"c"})
+ tests := []sortTestCase{
+ // No values.
+ {
+ []reflect.Value{},
+ []reflect.Value{},
+ },
+ // Bools.
+ {
+ []reflect.Value{v(false), v(true), v(false)},
+ []reflect.Value{v(false), v(false), v(true)},
+ },
+ // Ints.
+ {
+ []reflect.Value{v(2), v(1), v(3)},
+ []reflect.Value{v(1), v(2), v(3)},
+ },
+ // Uints.
+ {
+ []reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))},
+ []reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))},
+ },
+ // Floats.
+ {
+ []reflect.Value{v(2.0), v(1.0), v(3.0)},
+ []reflect.Value{v(1.0), v(2.0), v(3.0)},
+ },
+ // Strings.
+ {
+ []reflect.Value{b, a, c},
+ []reflect.Value{a, b, c},
+ },
+ // Array
+ {
+ []reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})},
+ []reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})},
+ },
+ // Uintptrs.
+ {
+ []reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))},
+ []reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))},
+ },
+ // SortableStructs.
+ {
+ // Note: not sorted - DisableMethods is set.
+ []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
+ []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
+ },
+ // UnsortableStructs.
+ {
+ // Note: not sorted - SpewKeys is false.
+ []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
+ []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
+ },
+ // Invalid.
+ {
+ []reflect.Value{embedB, embedA, embedC},
+ []reflect.Value{embedB, embedA, embedC},
+ },
+ }
+ cs := spew.ConfigState{DisableMethods: true, SpewKeys: false}
+ helpTestSortValues(tests, &cs, t)
+}
+
+// TestSortValuesWithMethods ensures the sort functionality for relect.Value
+// based sorting works as intended when using string methods.
+func TestSortValuesWithMethods(t *testing.T) {
+ v := reflect.ValueOf
+
+ a := v("a")
+ b := v("b")
+ c := v("c")
+ tests := []sortTestCase{
+ // Ints.
+ {
+ []reflect.Value{v(2), v(1), v(3)},
+ []reflect.Value{v(1), v(2), v(3)},
+ },
+ // Strings.
+ {
+ []reflect.Value{b, a, c},
+ []reflect.Value{a, b, c},
+ },
+ // SortableStructs.
+ {
+ []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
+ []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
+ },
+ // UnsortableStructs.
+ {
+ // Note: not sorted - SpewKeys is false.
+ []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
+ []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
+ },
+ }
+ cs := spew.ConfigState{DisableMethods: false, SpewKeys: false}
+ helpTestSortValues(tests, &cs, t)
+}
+
+// TestSortValuesWithSpew ensures the sort functionality for relect.Value
+// based sorting works as intended when using spew to stringify keys.
+func TestSortValuesWithSpew(t *testing.T) {
+ v := reflect.ValueOf
+
+ a := v("a")
+ b := v("b")
+ c := v("c")
+ tests := []sortTestCase{
+ // Ints.
+ {
+ []reflect.Value{v(2), v(1), v(3)},
+ []reflect.Value{v(1), v(2), v(3)},
+ },
+ // Strings.
+ {
+ []reflect.Value{b, a, c},
+ []reflect.Value{a, b, c},
+ },
+ // SortableStructs.
+ {
+ []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
+ []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
+ },
+ // UnsortableStructs.
+ {
+ []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
+ []reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})},
+ },
+ }
+ cs := spew.ConfigState{DisableMethods: true, SpewKeys: true}
+ helpTestSortValues(tests, &cs, t)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 0000000..2e3d22f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "safe" build tag specified.
+ DisablePointerMethods bool
+
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 0000000..aacaac6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr)
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 0000000..f78d89f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if !d.cs.DisableCapacities && valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump_test.go b/vendor/github.com/davecgh/go-spew/spew/dump_test.go
new file mode 100644
index 0000000..4a31a2e
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump_test.go
@@ -0,0 +1,1042 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Test Summary:
+NOTE: For each test, a nil pointer, a single pointer and double pointer to the
+base test element are also tested to ensure proper indirection across all types.
+
+- Max int8, int16, int32, int64, int
+- Max uint8, uint16, uint32, uint64, uint
+- Boolean true and false
+- Standard complex64 and complex128
+- Array containing standard ints
+- Array containing type with custom formatter on pointer receiver only
+- Array containing interfaces
+- Array containing bytes
+- Slice containing standard float32 values
+- Slice containing type with custom formatter on pointer receiver only
+- Slice containing interfaces
+- Slice containing bytes
+- Nil slice
+- Standard string
+- Nil interface
+- Sub-interface
+- Map with string keys and int vals
+- Map with custom formatter type on pointer receiver only keys and vals
+- Map with interface keys and values
+- Map with nil interface value
+- Struct with primitives
+- Struct that contains another struct
+- Struct that contains custom type with Stringer pointer interface via both
+ exported and unexported fields
+- Struct that contains embedded struct and field to same struct
+- Uintptr to 0 (null pointer)
+- Uintptr address of real variable
+- Unsafe.Pointer to 0 (null pointer)
+- Unsafe.Pointer to address of real variable
+- Nil channel
+- Standard int channel
+- Function with no params and no returns
+- Function with param and no returns
+- Function with multiple params and multiple returns
+- Struct that is circular through self referencing
+- Structs that are circular through cross referencing
+- Structs that are indirectly circular
+- Type that panics in its Stringer interface
+*/
+
+package spew_test
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+ "unsafe"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+// dumpTest is used to describe a test to be performed against the Dump method.
+type dumpTest struct {
+ in interface{}
+ wants []string
+}
+
+// dumpTests houses all of the tests to be performed against the Dump method.
+var dumpTests = make([]dumpTest, 0)
+
+// addDumpTest is a helper method to append the passed input and desired result
+// to dumpTests
+func addDumpTest(in interface{}, wants ...string) {
+ test := dumpTest{in, wants}
+ dumpTests = append(dumpTests, test)
+}
+
+func addIntDumpTests() {
+ // Max int8.
+ v := int8(127)
+ nv := (*int8)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "int8"
+ vs := "127"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")()\n")
+
+ // Max int16.
+ v2 := int16(32767)
+ nv2 := (*int16)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "int16"
+ v2s := "32767"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv2, "(*"+v2t+")()\n")
+
+ // Max int32.
+ v3 := int32(2147483647)
+ nv3 := (*int32)(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "int32"
+ v3s := "2147483647"
+ addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+ addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
+ addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
+ addDumpTest(nv3, "(*"+v3t+")()\n")
+
+ // Max int64.
+ v4 := int64(9223372036854775807)
+ nv4 := (*int64)(nil)
+ pv4 := &v4
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "int64"
+ v4s := "9223372036854775807"
+ addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+ addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
+ addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
+ addDumpTest(nv4, "(*"+v4t+")()\n")
+
+ // Max int.
+ v5 := int(2147483647)
+ nv5 := (*int)(nil)
+ pv5 := &v5
+ v5Addr := fmt.Sprintf("%p", pv5)
+ pv5Addr := fmt.Sprintf("%p", &pv5)
+ v5t := "int"
+ v5s := "2147483647"
+ addDumpTest(v5, "("+v5t+") "+v5s+"\n")
+ addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
+ addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
+ addDumpTest(nv5, "(*"+v5t+")()\n")
+}
+
+func addUintDumpTests() {
+ // Max uint8.
+ v := uint8(255)
+ nv := (*uint8)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "uint8"
+ vs := "255"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")()\n")
+
+ // Max uint16.
+ v2 := uint16(65535)
+ nv2 := (*uint16)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "uint16"
+ v2s := "65535"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv2, "(*"+v2t+")()\n")
+
+ // Max uint32.
+ v3 := uint32(4294967295)
+ nv3 := (*uint32)(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "uint32"
+ v3s := "4294967295"
+ addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+ addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
+ addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
+ addDumpTest(nv3, "(*"+v3t+")()\n")
+
+ // Max uint64.
+ v4 := uint64(18446744073709551615)
+ nv4 := (*uint64)(nil)
+ pv4 := &v4
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "uint64"
+ v4s := "18446744073709551615"
+ addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+ addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
+ addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
+ addDumpTest(nv4, "(*"+v4t+")()\n")
+
+ // Max uint.
+ v5 := uint(4294967295)
+ nv5 := (*uint)(nil)
+ pv5 := &v5
+ v5Addr := fmt.Sprintf("%p", pv5)
+ pv5Addr := fmt.Sprintf("%p", &pv5)
+ v5t := "uint"
+ v5s := "4294967295"
+ addDumpTest(v5, "("+v5t+") "+v5s+"\n")
+ addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
+ addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
+ addDumpTest(nv5, "(*"+v5t+")()\n")
+}
+
+func addBoolDumpTests() {
+ // Boolean true.
+ v := bool(true)
+ nv := (*bool)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "bool"
+ vs := "true"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")()\n")
+
+ // Boolean false.
+ v2 := bool(false)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "bool"
+ v2s := "false"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+}
+
+func addFloatDumpTests() {
+ // Standard float32.
+ v := float32(3.1415)
+ nv := (*float32)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "float32"
+ vs := "3.1415"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")()\n")
+
+ // Standard float64.
+ v2 := float64(3.1415926)
+ nv2 := (*float64)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "float64"
+ v2s := "3.1415926"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv2, "(*"+v2t+")()\n")
+}
+
+func addComplexDumpTests() {
+ // Standard complex64.
+ v := complex(float32(6), -2)
+ nv := (*complex64)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "complex64"
+ vs := "(6-2i)"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")()\n")
+
+ // Standard complex128.
+ v2 := complex(float64(-6), 2)
+ nv2 := (*complex128)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "complex128"
+ v2s := "(-6+2i)"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv2, "(*"+v2t+")()\n")
+}
+
+func addArrayDumpTests() {
+ // Array containing standard ints.
+ v := [3]int{1, 2, 3}
+ vLen := fmt.Sprintf("%d", len(v))
+ vCap := fmt.Sprintf("%d", cap(v))
+ nv := (*[3]int)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "int"
+ vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 1,\n (" +
+ vt + ") 2,\n (" + vt + ") 3\n}"
+ addDumpTest(v, "([3]"+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*[3]"+vt+")()\n")
+
+ // Array containing type with custom formatter on pointer receiver only.
+ v2i0 := pstringer("1")
+ v2i1 := pstringer("2")
+ v2i2 := pstringer("3")
+ v2 := [3]pstringer{v2i0, v2i1, v2i2}
+ v2i0Len := fmt.Sprintf("%d", len(v2i0))
+ v2i1Len := fmt.Sprintf("%d", len(v2i1))
+ v2i2Len := fmt.Sprintf("%d", len(v2i2))
+ v2Len := fmt.Sprintf("%d", len(v2))
+ v2Cap := fmt.Sprintf("%d", cap(v2))
+ nv2 := (*[3]pstringer)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "spew_test.pstringer"
+ v2sp := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t +
+ ") (len=" + v2i0Len + ") stringer 1,\n (" + v2t +
+ ") (len=" + v2i1Len + ") stringer 2,\n (" + v2t +
+ ") (len=" + v2i2Len + ") " + "stringer 3\n}"
+ v2s := v2sp
+ if spew.UnsafeDisabled {
+ v2s = "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t +
+ ") (len=" + v2i0Len + ") \"1\",\n (" + v2t + ") (len=" +
+ v2i1Len + ") \"2\",\n (" + v2t + ") (len=" + v2i2Len +
+ ") " + "\"3\"\n}"
+ }
+ addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2sp+")\n")
+ addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2sp+")\n")
+ addDumpTest(nv2, "(*[3]"+v2t+")()\n")
+
+ // Array containing interfaces.
+ v3i0 := "one"
+ v3 := [3]interface{}{v3i0, int(2), uint(3)}
+ v3i0Len := fmt.Sprintf("%d", len(v3i0))
+ v3Len := fmt.Sprintf("%d", len(v3))
+ v3Cap := fmt.Sprintf("%d", cap(v3))
+ nv3 := (*[3]interface{})(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "[3]interface {}"
+ v3t2 := "string"
+ v3t3 := "int"
+ v3t4 := "uint"
+ v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " +
+ "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" +
+ v3t4 + ") 3\n}"
+ addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+ addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
+ addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
+ addDumpTest(nv3, "(*"+v3t+")()\n")
+
+ // Array containing bytes.
+ v4 := [34]byte{
+ 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+ 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
+ 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
+ 0x31, 0x32,
+ }
+ v4Len := fmt.Sprintf("%d", len(v4))
+ v4Cap := fmt.Sprintf("%d", cap(v4))
+ nv4 := (*[34]byte)(nil)
+ pv4 := &v4
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "[34]uint8"
+ v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
+ "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" +
+ " |............... |\n" +
+ " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" +
+ " |!\"#$%&'()*+,-./0|\n" +
+ " 00000020 31 32 " +
+ " |12|\n}"
+ addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+ addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
+ addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
+ addDumpTest(nv4, "(*"+v4t+")()\n")
+}
+
+func addSliceDumpTests() {
+ // Slice containing standard float32 values.
+ v := []float32{3.14, 6.28, 12.56}
+ vLen := fmt.Sprintf("%d", len(v))
+ vCap := fmt.Sprintf("%d", cap(v))
+ nv := (*[]float32)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "float32"
+ vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 3.14,\n (" +
+ vt + ") 6.28,\n (" + vt + ") 12.56\n}"
+ addDumpTest(v, "([]"+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*[]"+vt+")()\n")
+
+ // Slice containing type with custom formatter on pointer receiver only.
+ v2i0 := pstringer("1")
+ v2i1 := pstringer("2")
+ v2i2 := pstringer("3")
+ v2 := []pstringer{v2i0, v2i1, v2i2}
+ v2i0Len := fmt.Sprintf("%d", len(v2i0))
+ v2i1Len := fmt.Sprintf("%d", len(v2i1))
+ v2i2Len := fmt.Sprintf("%d", len(v2i2))
+ v2Len := fmt.Sprintf("%d", len(v2))
+ v2Cap := fmt.Sprintf("%d", cap(v2))
+ nv2 := (*[]pstringer)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "spew_test.pstringer"
+ v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" +
+ v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len +
+ ") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " +
+ "stringer 3\n}"
+ addDumpTest(v2, "([]"+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv2, "(*[]"+v2t+")()\n")
+
+ // Slice containing interfaces.
+ v3i0 := "one"
+ v3 := []interface{}{v3i0, int(2), uint(3), nil}
+ v3i0Len := fmt.Sprintf("%d", len(v3i0))
+ v3Len := fmt.Sprintf("%d", len(v3))
+ v3Cap := fmt.Sprintf("%d", cap(v3))
+ nv3 := (*[]interface{})(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "[]interface {}"
+ v3t2 := "string"
+ v3t3 := "int"
+ v3t4 := "uint"
+ v3t5 := "interface {}"
+ v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " +
+ "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" +
+ v3t4 + ") 3,\n (" + v3t5 + ") \n}"
+ addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+ addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
+ addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
+ addDumpTest(nv3, "(*"+v3t+")()\n")
+
+ // Slice containing bytes.
+ v4 := []byte{
+ 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+ 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
+ 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
+ 0x31, 0x32,
+ }
+ v4Len := fmt.Sprintf("%d", len(v4))
+ v4Cap := fmt.Sprintf("%d", cap(v4))
+ nv4 := (*[]byte)(nil)
+ pv4 := &v4
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "[]uint8"
+ v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
+ "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" +
+ " |............... |\n" +
+ " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" +
+ " |!\"#$%&'()*+,-./0|\n" +
+ " 00000020 31 32 " +
+ " |12|\n}"
+ addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+ addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
+ addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
+ addDumpTest(nv4, "(*"+v4t+")()\n")
+
+ // Nil slice.
+ v5 := []int(nil)
+ nv5 := (*[]int)(nil)
+ pv5 := &v5
+ v5Addr := fmt.Sprintf("%p", pv5)
+ pv5Addr := fmt.Sprintf("%p", &pv5)
+ v5t := "[]int"
+ v5s := ""
+ addDumpTest(v5, "("+v5t+") "+v5s+"\n")
+ addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
+ addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
+ addDumpTest(nv5, "(*"+v5t+")()\n")
+}
+
+func addStringDumpTests() {
+ // Standard string.
+ v := "test"
+ vLen := fmt.Sprintf("%d", len(v))
+ nv := (*string)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "string"
+ vs := "(len=" + vLen + ") \"test\""
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")()\n")
+}
+
+func addInterfaceDumpTests() {
+ // Nil interface.
+ var v interface{}
+ nv := (*interface{})(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "interface {}"
+ vs := ""
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")()\n")
+
+ // Sub-interface.
+ v2 := interface{}(uint16(65535))
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "uint16"
+ v2s := "65535"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+}
+
+func addMapDumpTests() {
+ // Map with string keys and int vals.
+ k := "one"
+ kk := "two"
+ m := map[string]int{k: 1, kk: 2}
+ klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up
+ kkLen := fmt.Sprintf("%d", len(kk))
+ mLen := fmt.Sprintf("%d", len(m))
+ nilMap := map[string]int(nil)
+ nm := (*map[string]int)(nil)
+ pm := &m
+ mAddr := fmt.Sprintf("%p", pm)
+ pmAddr := fmt.Sprintf("%p", &pm)
+ mt := "map[string]int"
+ mt1 := "string"
+ mt2 := "int"
+ ms := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + klen + ") " +
+ "\"one\": (" + mt2 + ") 1,\n (" + mt1 + ") (len=" + kkLen +
+ ") \"two\": (" + mt2 + ") 2\n}"
+ ms2 := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + kkLen + ") " +
+ "\"two\": (" + mt2 + ") 2,\n (" + mt1 + ") (len=" + klen +
+ ") \"one\": (" + mt2 + ") 1\n}"
+ addDumpTest(m, "("+mt+") "+ms+"\n", "("+mt+") "+ms2+"\n")
+ addDumpTest(pm, "(*"+mt+")("+mAddr+")("+ms+")\n",
+ "(*"+mt+")("+mAddr+")("+ms2+")\n")
+ addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n",
+ "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n")
+ addDumpTest(nm, "(*"+mt+")()\n")
+ addDumpTest(nilMap, "("+mt+") \n")
+
+ // Map with custom formatter type on pointer receiver only keys and vals.
+ k2 := pstringer("one")
+ v2 := pstringer("1")
+ m2 := map[pstringer]pstringer{k2: v2}
+ k2Len := fmt.Sprintf("%d", len(k2))
+ v2Len := fmt.Sprintf("%d", len(v2))
+ m2Len := fmt.Sprintf("%d", len(m2))
+ nilMap2 := map[pstringer]pstringer(nil)
+ nm2 := (*map[pstringer]pstringer)(nil)
+ pm2 := &m2
+ m2Addr := fmt.Sprintf("%p", pm2)
+ pm2Addr := fmt.Sprintf("%p", &pm2)
+ m2t := "map[spew_test.pstringer]spew_test.pstringer"
+ m2t1 := "spew_test.pstringer"
+ m2t2 := "spew_test.pstringer"
+ m2s := "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + ") " +
+ "stringer one: (" + m2t2 + ") (len=" + v2Len + ") stringer 1\n}"
+ if spew.UnsafeDisabled {
+ m2s = "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len +
+ ") " + "\"one\": (" + m2t2 + ") (len=" + v2Len +
+ ") \"1\"\n}"
+ }
+ addDumpTest(m2, "("+m2t+") "+m2s+"\n")
+ addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n")
+ addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n")
+ addDumpTest(nm2, "(*"+m2t+")()\n")
+ addDumpTest(nilMap2, "("+m2t+") \n")
+
+ // Map with interface keys and values.
+ k3 := "one"
+ k3Len := fmt.Sprintf("%d", len(k3))
+ m3 := map[interface{}]interface{}{k3: 1}
+ m3Len := fmt.Sprintf("%d", len(m3))
+ nilMap3 := map[interface{}]interface{}(nil)
+ nm3 := (*map[interface{}]interface{})(nil)
+ pm3 := &m3
+ m3Addr := fmt.Sprintf("%p", pm3)
+ pm3Addr := fmt.Sprintf("%p", &pm3)
+ m3t := "map[interface {}]interface {}"
+ m3t1 := "string"
+ m3t2 := "int"
+ m3s := "(len=" + m3Len + ") {\n (" + m3t1 + ") (len=" + k3Len + ") " +
+ "\"one\": (" + m3t2 + ") 1\n}"
+ addDumpTest(m3, "("+m3t+") "+m3s+"\n")
+ addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n")
+ addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n")
+ addDumpTest(nm3, "(*"+m3t+")()\n")
+ addDumpTest(nilMap3, "("+m3t+") \n")
+
+ // Map with nil interface value.
+ k4 := "nil"
+ k4Len := fmt.Sprintf("%d", len(k4))
+ m4 := map[string]interface{}{k4: nil}
+ m4Len := fmt.Sprintf("%d", len(m4))
+ nilMap4 := map[string]interface{}(nil)
+ nm4 := (*map[string]interface{})(nil)
+ pm4 := &m4
+ m4Addr := fmt.Sprintf("%p", pm4)
+ pm4Addr := fmt.Sprintf("%p", &pm4)
+ m4t := "map[string]interface {}"
+ m4t1 := "string"
+ m4t2 := "interface {}"
+ m4s := "(len=" + m4Len + ") {\n (" + m4t1 + ") (len=" + k4Len + ")" +
+ " \"nil\": (" + m4t2 + ") \n}"
+ addDumpTest(m4, "("+m4t+") "+m4s+"\n")
+ addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n")
+ addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n")
+ addDumpTest(nm4, "(*"+m4t+")()\n")
+ addDumpTest(nilMap4, "("+m4t+") \n")
+}
+
+func addStructDumpTests() {
+ // Struct with primitives.
+ type s1 struct {
+ a int8
+ b uint8
+ }
+ v := s1{127, 255}
+ nv := (*s1)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "spew_test.s1"
+ vt2 := "int8"
+ vt3 := "uint8"
+ vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")()\n")
+
+ // Struct that contains another struct.
+ type s2 struct {
+ s1 s1
+ b bool
+ }
+ v2 := s2{s1{127, 255}, true}
+ nv2 := (*s2)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "spew_test.s2"
+ v2t2 := "spew_test.s1"
+ v2t3 := "int8"
+ v2t4 := "uint8"
+ v2t5 := "bool"
+ v2s := "{\n s1: (" + v2t2 + ") {\n a: (" + v2t3 + ") 127,\n b: (" +
+ v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv2, "(*"+v2t+")()\n")
+
+ // Struct that contains custom type with Stringer pointer interface via both
+ // exported and unexported fields.
+ type s3 struct {
+ s pstringer
+ S pstringer
+ }
+ v3 := s3{"test", "test2"}
+ nv3 := (*s3)(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "spew_test.s3"
+ v3t2 := "spew_test.pstringer"
+ v3s := "{\n s: (" + v3t2 + ") (len=4) stringer test,\n S: (" + v3t2 +
+ ") (len=5) stringer test2\n}"
+ v3sp := v3s
+ if spew.UnsafeDisabled {
+ v3s = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" +
+ v3t2 + ") (len=5) \"test2\"\n}"
+ v3sp = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" +
+ v3t2 + ") (len=5) stringer test2\n}"
+ }
+ addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+ addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3sp+")\n")
+ addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3sp+")\n")
+ addDumpTest(nv3, "(*"+v3t+")()\n")
+
+ // Struct that contains embedded struct and field to same struct.
+ e := embed{"embedstr"}
+ eLen := fmt.Sprintf("%d", len("embedstr"))
+ v4 := embedwrap{embed: &e, e: &e}
+ nv4 := (*embedwrap)(nil)
+ pv4 := &v4
+ eAddr := fmt.Sprintf("%p", &e)
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "spew_test.embedwrap"
+ v4t2 := "spew_test.embed"
+ v4t3 := "string"
+ v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n a: (" + v4t3 +
+ ") (len=" + eLen + ") \"embedstr\"\n }),\n e: (*" + v4t2 +
+ ")(" + eAddr + ")({\n a: (" + v4t3 + ") (len=" + eLen + ")" +
+ " \"embedstr\"\n })\n}"
+ addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+ addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
+ addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
+ addDumpTest(nv4, "(*"+v4t+")()\n")
+}
+
+func addUintptrDumpTests() {
+ // Null pointer.
+ v := uintptr(0)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "uintptr"
+ vs := ""
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+
+ // Address of real variable.
+ i := 1
+ v2 := uintptr(unsafe.Pointer(&i))
+ nv2 := (*uintptr)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "uintptr"
+ v2s := fmt.Sprintf("%p", &i)
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv2, "(*"+v2t+")()\n")
+}
+
+func addUnsafePointerDumpTests() {
+ // Null pointer.
+ v := unsafe.Pointer(nil)
+ nv := (*unsafe.Pointer)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "unsafe.Pointer"
+ vs := ""
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")()\n")
+
+ // Address of real variable.
+ i := 1
+ v2 := unsafe.Pointer(&i)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "unsafe.Pointer"
+ v2s := fmt.Sprintf("%p", &i)
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv, "(*"+vt+")()\n")
+}
+
+func addChanDumpTests() {
+ // Nil channel.
+ var v chan int
+ pv := &v
+ nv := (*chan int)(nil)
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "chan int"
+ vs := ""
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")()\n")
+
+ // Real channel.
+ v2 := make(chan int)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "chan int"
+ v2s := fmt.Sprintf("%p", v2)
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+}
+
+func addFuncDumpTests() {
+ // Function with no params and no returns.
+ v := addIntDumpTests
+ nv := (*func())(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "func()"
+ vs := fmt.Sprintf("%p", v)
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")()\n")
+
+ // Function with param and no returns.
+ v2 := TestDump
+ nv2 := (*func(*testing.T))(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "func(*testing.T)"
+ v2s := fmt.Sprintf("%p", v2)
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv2, "(*"+v2t+")()\n")
+
+ // Function with multiple params and multiple returns.
+ var v3 = func(i int, s string) (b bool, err error) {
+ return true, nil
+ }
+ nv3 := (*func(int, string) (bool, error))(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "func(int, string) (bool, error)"
+ v3s := fmt.Sprintf("%p", v3)
+ addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+ addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
+ addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
+ addDumpTest(nv3, "(*"+v3t+")()\n")
+}
+
+func addCircularDumpTests() {
+ // Struct that is circular through self referencing.
+ type circular struct {
+ c *circular
+ }
+ v := circular{nil}
+ v.c = &v
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "spew_test.circular"
+ vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n c: (*" + vt + ")(" +
+ vAddr + ")()\n })\n}"
+ vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")()\n}"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n")
+
+ // Structs that are circular through cross referencing.
+ v2 := xref1{nil}
+ ts2 := xref2{&v2}
+ v2.ps2 = &ts2
+ pv2 := &v2
+ ts2Addr := fmt.Sprintf("%p", &ts2)
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "spew_test.xref1"
+ v2t2 := "spew_test.xref2"
+ v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t +
+ ")(" + v2Addr + ")({\n ps2: (*" + v2t2 + ")(" + ts2Addr +
+ ")()\n })\n })\n}"
+ v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t +
+ ")(" + v2Addr + ")()\n })\n}"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n")
+
+ // Structs that are indirectly circular.
+ v3 := indirCir1{nil}
+ tic2 := indirCir2{nil}
+ tic3 := indirCir3{&v3}
+ tic2.ps3 = &tic3
+ v3.ps2 = &tic2
+ pv3 := &v3
+ tic2Addr := fmt.Sprintf("%p", &tic2)
+ tic3Addr := fmt.Sprintf("%p", &tic3)
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "spew_test.indirCir1"
+ v3t2 := "spew_test.indirCir2"
+ v3t3 := "spew_test.indirCir3"
+ v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 +
+ ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr +
+ ")({\n ps2: (*" + v3t2 + ")(" + tic2Addr +
+ ")()\n })\n })\n })\n}"
+ v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 +
+ ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr +
+ ")()\n })\n })\n}"
+ addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+ addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n")
+ addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n")
+}
+
+func addPanicDumpTests() {
+ // Type that panics in its Stringer interface.
+ v := panicer(127)
+ nv := (*panicer)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "spew_test.panicer"
+ vs := "(PANIC=test panic)127"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")()\n")
+}
+
+func addErrorDumpTests() {
+ // Type that has a custom Error interface.
+ v := customError(127)
+ nv := (*customError)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "spew_test.customError"
+ vs := "error: 127"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")()\n")
+}
+
+// TestDump executes all of the tests described by dumpTests.
+func TestDump(t *testing.T) {
+ // Setup tests.
+ addIntDumpTests()
+ addUintDumpTests()
+ addBoolDumpTests()
+ addFloatDumpTests()
+ addComplexDumpTests()
+ addArrayDumpTests()
+ addSliceDumpTests()
+ addStringDumpTests()
+ addInterfaceDumpTests()
+ addMapDumpTests()
+ addStructDumpTests()
+ addUintptrDumpTests()
+ addUnsafePointerDumpTests()
+ addChanDumpTests()
+ addFuncDumpTests()
+ addCircularDumpTests()
+ addPanicDumpTests()
+ addErrorDumpTests()
+ addCgoDumpTests()
+
+ t.Logf("Running %d tests", len(dumpTests))
+ for i, test := range dumpTests {
+ buf := new(bytes.Buffer)
+ spew.Fdump(buf, test.in)
+ s := buf.String()
+ if testFailed(s, test.wants) {
+ t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants))
+ continue
+ }
+ }
+}
+
+func TestDumpSortedKeys(t *testing.T) {
+ cfg := spew.ConfigState{SortKeys: true}
+ s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"})
+ expected := "(map[int]string) (len=3) {\n(int) 1: (string) (len=1) " +
+ "\"1\",\n(int) 2: (string) (len=1) \"2\",\n(int) 3: (string) " +
+ "(len=1) \"3\"\n" +
+ "}\n"
+ if s != expected {
+ t.Errorf("Sorted keys mismatch:\n %v %v", s, expected)
+ }
+
+ s = cfg.Sdump(map[stringer]int{"1": 1, "3": 3, "2": 2})
+ expected = "(map[spew_test.stringer]int) (len=3) {\n" +
+ "(spew_test.stringer) (len=1) stringer 1: (int) 1,\n" +
+ "(spew_test.stringer) (len=1) stringer 2: (int) 2,\n" +
+ "(spew_test.stringer) (len=1) stringer 3: (int) 3\n" +
+ "}\n"
+ if s != expected {
+ t.Errorf("Sorted keys mismatch:\n %v %v", s, expected)
+ }
+
+ s = cfg.Sdump(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2})
+ expected = "(map[spew_test.pstringer]int) (len=3) {\n" +
+ "(spew_test.pstringer) (len=1) stringer 1: (int) 1,\n" +
+ "(spew_test.pstringer) (len=1) stringer 2: (int) 2,\n" +
+ "(spew_test.pstringer) (len=1) stringer 3: (int) 3\n" +
+ "}\n"
+ if spew.UnsafeDisabled {
+ expected = "(map[spew_test.pstringer]int) (len=3) {\n" +
+ "(spew_test.pstringer) (len=1) \"1\": (int) 1,\n" +
+ "(spew_test.pstringer) (len=1) \"2\": (int) 2,\n" +
+ "(spew_test.pstringer) (len=1) \"3\": (int) 3\n" +
+ "}\n"
+ }
+ if s != expected {
+ t.Errorf("Sorted keys mismatch:\n %v %v", s, expected)
+ }
+
+ s = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2})
+ expected = "(map[spew_test.customError]int) (len=3) {\n" +
+ "(spew_test.customError) error: 1: (int) 1,\n" +
+ "(spew_test.customError) error: 2: (int) 2,\n" +
+ "(spew_test.customError) error: 3: (int) 3\n" +
+ "}\n"
+ if s != expected {
+ t.Errorf("Sorted keys mismatch:\n %v %v", s, expected)
+ }
+
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go
new file mode 100644
index 0000000..108baa5
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2013-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when both cgo is supported and "-tags testcgo" is added to the go test
+// command line. This means the cgo tests are only added (and hence run) when
+// specifially requested. This configuration is used because spew itself
+// does not require cgo to run even though it does handle certain cgo types
+// specially. Rather than forcing all clients to require cgo and an external
+// C compiler just to run the tests, this scheme makes them optional.
+// +build cgo,testcgo
+
+package spew_test
+
+import (
+ "fmt"
+
+ "github.com/davecgh/go-spew/spew/testdata"
+)
+
+func addCgoDumpTests() {
+ // C char pointer.
+ v := testdata.GetCgoCharPointer()
+ nv := testdata.GetCgoNullCharPointer()
+ pv := &v
+ vcAddr := fmt.Sprintf("%p", v)
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "*testdata._Ctype_char"
+ vs := "116"
+ addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n")
+ addDumpTest(nv, "("+vt+")()\n")
+
+ // C char array.
+ v2, v2l, v2c := testdata.GetCgoCharArray()
+ v2Len := fmt.Sprintf("%d", v2l)
+ v2Cap := fmt.Sprintf("%d", v2c)
+ v2t := "[6]testdata._Ctype_char"
+ v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " +
+ "{\n 00000000 74 65 73 74 32 00 " +
+ " |test2.|\n}"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+
+ // C unsigned char array.
+ v3, v3l, v3c := testdata.GetCgoUnsignedCharArray()
+ v3Len := fmt.Sprintf("%d", v3l)
+ v3Cap := fmt.Sprintf("%d", v3c)
+ v3t := "[6]testdata._Ctype_unsignedchar"
+ v3t2 := "[6]testdata._Ctype_uchar"
+ v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " +
+ "{\n 00000000 74 65 73 74 33 00 " +
+ " |test3.|\n}"
+ addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n")
+
+ // C signed char array.
+ v4, v4l, v4c := testdata.GetCgoSignedCharArray()
+ v4Len := fmt.Sprintf("%d", v4l)
+ v4Cap := fmt.Sprintf("%d", v4c)
+ v4t := "[6]testdata._Ctype_schar"
+ v4t2 := "testdata._Ctype_schar"
+ v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
+ "{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 +
+ ") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 +
+ ") 0\n}"
+ addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+
+ // C uint8_t array.
+ v5, v5l, v5c := testdata.GetCgoUint8tArray()
+ v5Len := fmt.Sprintf("%d", v5l)
+ v5Cap := fmt.Sprintf("%d", v5c)
+ v5t := "[6]testdata._Ctype_uint8_t"
+ v5t2 := "[6]testdata._Ctype_uchar"
+ v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " +
+ "{\n 00000000 74 65 73 74 35 00 " +
+ " |test5.|\n}"
+ addDumpTest(v5, "("+v5t+") "+v5s+"\n", "("+v5t2+") "+v5s+"\n")
+
+ // C typedefed unsigned char array.
+ v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()
+ v6Len := fmt.Sprintf("%d", v6l)
+ v6Cap := fmt.Sprintf("%d", v6c)
+ v6t := "[6]testdata._Ctype_custom_uchar_t"
+ v6t2 := "[6]testdata._Ctype_uchar"
+ v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " +
+ "{\n 00000000 74 65 73 74 36 00 " +
+ " |test6.|\n}"
+ addDumpTest(v6, "("+v6t+") "+v6s+"\n", "("+v6t2+") "+v6s+"\n")
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go
new file mode 100644
index 0000000..52a0971
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go
@@ -0,0 +1,26 @@
+// Copyright (c) 2013 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when either cgo is not supported or "-tags testcgo" is not added to the go
+// test command line. This file intentionally does not setup any cgo tests in
+// this scenario.
+// +build !cgo !testcgo
+
+package spew_test
+
+func addCgoDumpTests() {
+ // Don't add any tests for cgo since this file is only compiled when
+ // there should not be any cgo tests.
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/example_test.go b/vendor/github.com/davecgh/go-spew/spew/example_test.go
new file mode 100644
index 0000000..c6ec8c6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/example_test.go
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew_test
+
+import (
+ "fmt"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+type Flag int
+
+const (
+ flagOne Flag = iota
+ flagTwo
+)
+
+var flagStrings = map[Flag]string{
+ flagOne: "flagOne",
+ flagTwo: "flagTwo",
+}
+
+func (f Flag) String() string {
+ if s, ok := flagStrings[f]; ok {
+ return s
+ }
+ return fmt.Sprintf("Unknown flag (%d)", int(f))
+}
+
+type Bar struct {
+ data uintptr
+}
+
+type Foo struct {
+ unexportedField Bar
+ ExportedField map[interface{}]interface{}
+}
+
+// This example demonstrates how to use Dump to dump variables to stdout.
+func ExampleDump() {
+ // The following package level declarations are assumed for this example:
+ /*
+ type Flag int
+
+ const (
+ flagOne Flag = iota
+ flagTwo
+ )
+
+ var flagStrings = map[Flag]string{
+ flagOne: "flagOne",
+ flagTwo: "flagTwo",
+ }
+
+ func (f Flag) String() string {
+ if s, ok := flagStrings[f]; ok {
+ return s
+ }
+ return fmt.Sprintf("Unknown flag (%d)", int(f))
+ }
+
+ type Bar struct {
+ data uintptr
+ }
+
+ type Foo struct {
+ unexportedField Bar
+ ExportedField map[interface{}]interface{}
+ }
+ */
+
+ // Setup some sample data structures for the example.
+ bar := Bar{uintptr(0)}
+ s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
+ f := Flag(5)
+ b := []byte{
+ 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+ 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
+ 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
+ 0x31, 0x32,
+ }
+
+ // Dump!
+ spew.Dump(s1, f, b)
+
+ // Output:
+ // (spew_test.Foo) {
+ // unexportedField: (spew_test.Bar) {
+ // data: (uintptr)
+ // },
+ // ExportedField: (map[interface {}]interface {}) (len=1) {
+ // (string) (len=3) "one": (bool) true
+ // }
+ // }
+ // (spew_test.Flag) Unknown flag (5)
+ // ([]uint8) (len=34 cap=34) {
+ // 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ // 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ // 00000020 31 32 |12|
+ // }
+ //
+}
+
+// This example demonstrates how to use Printf to display a variable with a
+// format string and inline formatting.
+func ExamplePrintf() {
+ // Create a double pointer to a uint 8.
+ ui8 := uint8(5)
+ pui8 := &ui8
+ ppui8 := &pui8
+
+ // Create a circular data type.
+ type circular struct {
+ ui8 uint8
+ c *circular
+ }
+ c := circular{ui8: 1}
+ c.c = &c
+
+ // Print!
+ spew.Printf("ppui8: %v\n", ppui8)
+ spew.Printf("circular: %v\n", c)
+
+ // Output:
+ // ppui8: <**>5
+ // circular: {1 <*>{1 <*>}}
+}
+
+// This example demonstrates how to use a ConfigState.
+func ExampleConfigState() {
+ // Modify the indent level of the ConfigState only. The global
+ // configuration is not modified.
+ scs := spew.ConfigState{Indent: "\t"}
+
+ // Output using the ConfigState instance.
+ v := map[string]int{"one": 1}
+ scs.Printf("v: %v\n", v)
+ scs.Dump(v)
+
+ // Output:
+ // v: map[one:1]
+ // (map[string]int) (len=1) {
+ // (string) (len=3) "one": (int) 1
+ // }
+}
+
+// This example demonstrates how to use ConfigState.Dump to dump variables to
+// stdout
+func ExampleConfigState_Dump() {
+ // See the top-level Dump example for details on the types used in this
+ // example.
+
+ // Create two ConfigState instances with different indentation.
+ scs := spew.ConfigState{Indent: "\t"}
+ scs2 := spew.ConfigState{Indent: " "}
+
+ // Setup some sample data structures for the example.
+ bar := Bar{uintptr(0)}
+ s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
+
+ // Dump using the ConfigState instances.
+ scs.Dump(s1)
+ scs2.Dump(s1)
+
+ // Output:
+ // (spew_test.Foo) {
+ // unexportedField: (spew_test.Bar) {
+ // data: (uintptr)
+ // },
+ // ExportedField: (map[interface {}]interface {}) (len=1) {
+ // (string) (len=3) "one": (bool) true
+ // }
+ // }
+ // (spew_test.Foo) {
+ // unexportedField: (spew_test.Bar) {
+ // data: (uintptr)
+ // },
+ // ExportedField: (map[interface {}]interface {}) (len=1) {
+ // (string) (len=3) "one": (bool) true
+ // }
+ // }
+ //
+}
+
+// This example demonstrates how to use ConfigState.Printf to display a variable
+// with a format string and inline formatting.
+func ExampleConfigState_Printf() {
+ // See the top-level Dump example for details on the types used in this
+ // example.
+
+ // Create two ConfigState instances and modify the method handling of the
+ // first ConfigState only.
+ scs := spew.NewDefaultConfig()
+ scs2 := spew.NewDefaultConfig()
+ scs.DisableMethods = true
+
+ // Alternatively
+ // scs := spew.ConfigState{Indent: " ", DisableMethods: true}
+ // scs2 := spew.ConfigState{Indent: " "}
+
+ // This is of type Flag which implements a Stringer and has raw value 1.
+ f := flagTwo
+
+ // Dump using the ConfigState instances.
+ scs.Printf("f: %v\n", f)
+ scs2.Printf("f: %v\n", f)
+
+ // Output:
+ // f: 1
+ // f: flagTwo
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 0000000..b04edb7
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format_test.go b/vendor/github.com/davecgh/go-spew/spew/format_test.go
new file mode 100644
index 0000000..87ee965
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format_test.go
@@ -0,0 +1,1558 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Test Summary:
+NOTE: For each test, a nil pointer, a single pointer and double pointer to the
+base test element are also tested to ensure proper indirection across all types.
+
+- Max int8, int16, int32, int64, int
+- Max uint8, uint16, uint32, uint64, uint
+- Boolean true and false
+- Standard complex64 and complex128
+- Array containing standard ints
+- Array containing type with custom formatter on pointer receiver only
+- Array containing interfaces
+- Slice containing standard float32 values
+- Slice containing type with custom formatter on pointer receiver only
+- Slice containing interfaces
+- Nil slice
+- Standard string
+- Nil interface
+- Sub-interface
+- Map with string keys and int vals
+- Map with custom formatter type on pointer receiver only keys and vals
+- Map with interface keys and values
+- Map with nil interface value
+- Struct with primitives
+- Struct that contains another struct
+- Struct that contains custom type with Stringer pointer interface via both
+ exported and unexported fields
+- Struct that contains embedded struct and field to same struct
+- Uintptr to 0 (null pointer)
+- Uintptr address of real variable
+- Unsafe.Pointer to 0 (null pointer)
+- Unsafe.Pointer to address of real variable
+- Nil channel
+- Standard int channel
+- Function with no params and no returns
+- Function with param and no returns
+- Function with multiple params and multiple returns
+- Struct that is circular through self referencing
+- Structs that are circular through cross referencing
+- Structs that are indirectly circular
+- Type that panics in its Stringer interface
+- Type that has a custom Error interface
+- %x passthrough with uint
+- %#x passthrough with uint
+- %f passthrough with precision
+- %f passthrough with width and precision
+- %d passthrough with width
+- %q passthrough with string
+*/
+
+package spew_test
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+ "unsafe"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+// formatterTest is used to describe a test to be performed against NewFormatter.
+type formatterTest struct {
+ format string
+ in interface{}
+ wants []string
+}
+
+// formatterTests houses all of the tests to be performed against NewFormatter.
+var formatterTests = make([]formatterTest, 0)
+
+// addFormatterTest is a helper method to append the passed input and desired
+// result to formatterTests.
+func addFormatterTest(format string, in interface{}, wants ...string) {
+ test := formatterTest{format, in, wants}
+ formatterTests = append(formatterTests, test)
+}
+
+func addIntFormatterTests() {
+ // Max int8.
+ v := int8(127)
+ nv := (*int8)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "int8"
+ vs := "127"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%v", nv, "")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
+
+ // Max int16.
+ v2 := int16(32767)
+ nv2 := (*int16)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "int16"
+ v2s := "32767"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%v", nv2, "")
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%+v", nv2, "")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"")
+
+ // Max int32.
+ v3 := int32(2147483647)
+ nv3 := (*int32)(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "int32"
+ v3s := "2147483647"
+ addFormatterTest("%v", v3, v3s)
+ addFormatterTest("%v", pv3, "<*>"+v3s)
+ addFormatterTest("%v", &pv3, "<**>"+v3s)
+ addFormatterTest("%v", nv3, "")
+ addFormatterTest("%+v", v3, v3s)
+ addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+ addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+ addFormatterTest("%+v", nv3, "")
+ addFormatterTest("%#v", v3, "("+v3t+")"+v3s)
+ addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s)
+ addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"")
+ addFormatterTest("%#+v", v3, "("+v3t+")"+v3s)
+ addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s)
+ addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"")
+
+ // Max int64.
+ v4 := int64(9223372036854775807)
+ nv4 := (*int64)(nil)
+ pv4 := &v4
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "int64"
+ v4s := "9223372036854775807"
+ addFormatterTest("%v", v4, v4s)
+ addFormatterTest("%v", pv4, "<*>"+v4s)
+ addFormatterTest("%v", &pv4, "<**>"+v4s)
+ addFormatterTest("%v", nv4, "")
+ addFormatterTest("%+v", v4, v4s)
+ addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
+ addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
+ addFormatterTest("%+v", nv4, "")
+ addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
+ addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
+ addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
+ addFormatterTest("%#v", nv4, "(*"+v4t+")"+"")
+ addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
+ addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
+ addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
+ addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"")
+
+ // Max int.
+ v5 := int(2147483647)
+ nv5 := (*int)(nil)
+ pv5 := &v5
+ v5Addr := fmt.Sprintf("%p", pv5)
+ pv5Addr := fmt.Sprintf("%p", &pv5)
+ v5t := "int"
+ v5s := "2147483647"
+ addFormatterTest("%v", v5, v5s)
+ addFormatterTest("%v", pv5, "<*>"+v5s)
+ addFormatterTest("%v", &pv5, "<**>"+v5s)
+ addFormatterTest("%v", nv5, "")
+ addFormatterTest("%+v", v5, v5s)
+ addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s)
+ addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s)
+ addFormatterTest("%+v", nv5, "")
+ addFormatterTest("%#v", v5, "("+v5t+")"+v5s)
+ addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s)
+ addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s)
+ addFormatterTest("%#v", nv5, "(*"+v5t+")"+"")
+ addFormatterTest("%#+v", v5, "("+v5t+")"+v5s)
+ addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s)
+ addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s)
+ addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"")
+}
+
+func addUintFormatterTests() {
+ // Max uint8.
+ v := uint8(255)
+ nv := (*uint8)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "uint8"
+ vs := "255"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%v", nv, "")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
+
+ // Max uint16.
+ v2 := uint16(65535)
+ nv2 := (*uint16)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "uint16"
+ v2s := "65535"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%v", nv2, "")
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%+v", nv2, "")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"")
+
+ // Max uint32.
+ v3 := uint32(4294967295)
+ nv3 := (*uint32)(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "uint32"
+ v3s := "4294967295"
+ addFormatterTest("%v", v3, v3s)
+ addFormatterTest("%v", pv3, "<*>"+v3s)
+ addFormatterTest("%v", &pv3, "<**>"+v3s)
+ addFormatterTest("%v", nv3, "")
+ addFormatterTest("%+v", v3, v3s)
+ addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+ addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+ addFormatterTest("%+v", nv3, "")
+ addFormatterTest("%#v", v3, "("+v3t+")"+v3s)
+ addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s)
+ addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"")
+ addFormatterTest("%#+v", v3, "("+v3t+")"+v3s)
+ addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s)
+ addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"")
+
+ // Max uint64.
+ v4 := uint64(18446744073709551615)
+ nv4 := (*uint64)(nil)
+ pv4 := &v4
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "uint64"
+ v4s := "18446744073709551615"
+ addFormatterTest("%v", v4, v4s)
+ addFormatterTest("%v", pv4, "<*>"+v4s)
+ addFormatterTest("%v", &pv4, "<**>"+v4s)
+ addFormatterTest("%v", nv4, "")
+ addFormatterTest("%+v", v4, v4s)
+ addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
+ addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
+ addFormatterTest("%+v", nv4, "")
+ addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
+ addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
+ addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
+ addFormatterTest("%#v", nv4, "(*"+v4t+")"+"")
+ addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
+ addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
+ addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
+ addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"")
+
+ // Max uint.
+ v5 := uint(4294967295)
+ nv5 := (*uint)(nil)
+ pv5 := &v5
+ v5Addr := fmt.Sprintf("%p", pv5)
+ pv5Addr := fmt.Sprintf("%p", &pv5)
+ v5t := "uint"
+ v5s := "4294967295"
+ addFormatterTest("%v", v5, v5s)
+ addFormatterTest("%v", pv5, "<*>"+v5s)
+ addFormatterTest("%v", &pv5, "<**>"+v5s)
+ addFormatterTest("%v", nv5, "")
+ addFormatterTest("%+v", v5, v5s)
+ addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s)
+ addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s)
+ addFormatterTest("%+v", nv5, "")
+ addFormatterTest("%#v", v5, "("+v5t+")"+v5s)
+ addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s)
+ addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s)
+ addFormatterTest("%#v", nv5, "(*"+v5t+")"+"")
+ addFormatterTest("%#+v", v5, "("+v5t+")"+v5s)
+ addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s)
+ addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s)
+ addFormatterTest("%#v", nv5, "(*"+v5t+")"+"")
+}
+
+func addBoolFormatterTests() {
+ // Boolean true.
+ v := bool(true)
+ nv := (*bool)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "bool"
+ vs := "true"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%v", nv, "")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
+
+ // Boolean false.
+ v2 := bool(false)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "bool"
+ v2s := "false"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+}
+
+func addFloatFormatterTests() {
+ // Standard float32.
+ v := float32(3.1415)
+ nv := (*float32)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "float32"
+ vs := "3.1415"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%v", nv, "")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
+
+ // Standard float64.
+ v2 := float64(3.1415926)
+ nv2 := (*float64)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "float64"
+ v2s := "3.1415926"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", nv2, "")
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%+v", nv2, "")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"")
+}
+
+func addComplexFormatterTests() {
+ // Standard complex64.
+ v := complex(float32(6), -2)
+ nv := (*complex64)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "complex64"
+ vs := "(6-2i)"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
+
+ // Standard complex128.
+ v2 := complex(float64(-6), 2)
+ nv2 := (*complex128)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "complex128"
+ v2s := "(-6+2i)"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", nv2, "")
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%+v", nv2, "")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"")
+}
+
+func addArrayFormatterTests() {
+ // Array containing standard ints.
+ v := [3]int{1, 2, 3}
+ nv := (*[3]int)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "[3]int"
+ vs := "[1 2 3]"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
+
+ // Array containing type with custom formatter on pointer receiver only.
+ v2 := [3]pstringer{"1", "2", "3"}
+ nv2 := (*[3]pstringer)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "[3]spew_test.pstringer"
+ v2sp := "[stringer 1 stringer 2 stringer 3]"
+ v2s := v2sp
+ if spew.UnsafeDisabled {
+ v2s = "[1 2 3]"
+ }
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2sp)
+ addFormatterTest("%v", &pv2, "<**>"+v2sp)
+ addFormatterTest("%+v", nv2, "")
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2sp)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2sp)
+ addFormatterTest("%+v", nv2, "")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2sp)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2sp)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2sp)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2sp)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"")
+
+ // Array containing interfaces.
+ v3 := [3]interface{}{"one", int(2), uint(3)}
+ nv3 := (*[3]interface{})(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "[3]interface {}"
+ v3t2 := "string"
+ v3t3 := "int"
+ v3t4 := "uint"
+ v3s := "[one 2 3]"
+ v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]"
+ addFormatterTest("%v", v3, v3s)
+ addFormatterTest("%v", pv3, "<*>"+v3s)
+ addFormatterTest("%v", &pv3, "<**>"+v3s)
+ addFormatterTest("%+v", nv3, "")
+ addFormatterTest("%+v", v3, v3s)
+ addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+ addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+ addFormatterTest("%+v", nv3, "")
+ addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
+ addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
+ addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"")
+ addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
+ addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
+ addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
+ addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"")
+}
+
+func addSliceFormatterTests() {
+ // Slice containing standard float32 values.
+ v := []float32{3.14, 6.28, 12.56}
+ nv := (*[]float32)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "[]float32"
+ vs := "[3.14 6.28 12.56]"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
+
+ // Slice containing type with custom formatter on pointer receiver only.
+ v2 := []pstringer{"1", "2", "3"}
+ nv2 := (*[]pstringer)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "[]spew_test.pstringer"
+ v2s := "[stringer 1 stringer 2 stringer 3]"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", nv2, "")
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%+v", nv2, "")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"")
+
+ // Slice containing interfaces.
+ v3 := []interface{}{"one", int(2), uint(3), nil}
+ nv3 := (*[]interface{})(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "[]interface {}"
+ v3t2 := "string"
+ v3t3 := "int"
+ v3t4 := "uint"
+ v3t5 := "interface {}"
+ v3s := "[one 2 3 ]"
+ v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 +
+ ")]"
+ addFormatterTest("%v", v3, v3s)
+ addFormatterTest("%v", pv3, "<*>"+v3s)
+ addFormatterTest("%v", &pv3, "<**>"+v3s)
+ addFormatterTest("%+v", nv3, "")
+ addFormatterTest("%+v", v3, v3s)
+ addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+ addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+ addFormatterTest("%+v", nv3, "")
+ addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
+ addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
+ addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"")
+ addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
+ addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
+ addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
+ addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"")
+
+ // Nil slice.
+ var v4 []int
+ nv4 := (*[]int)(nil)
+ pv4 := &v4
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "[]int"
+ v4s := ""
+ addFormatterTest("%v", v4, v4s)
+ addFormatterTest("%v", pv4, "<*>"+v4s)
+ addFormatterTest("%v", &pv4, "<**>"+v4s)
+ addFormatterTest("%+v", nv4, "")
+ addFormatterTest("%+v", v4, v4s)
+ addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
+ addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
+ addFormatterTest("%+v", nv4, "")
+ addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
+ addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
+ addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
+ addFormatterTest("%#v", nv4, "(*"+v4t+")"+"")
+ addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
+ addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
+ addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
+ addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"")
+}
+
+func addStringFormatterTests() {
+ // Standard string.
+ v := "test"
+ nv := (*string)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "string"
+ vs := "test"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
+}
+
+func addInterfaceFormatterTests() {
+ // Nil interface.
+ var v interface{}
+ nv := (*interface{})(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "interface {}"
+ vs := ""
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
+
+ // Sub-interface.
+ v2 := interface{}(uint16(65535))
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "uint16"
+ v2s := "65535"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+}
+
+func addMapFormatterTests() {
+ // Map with string keys and int vals.
+ v := map[string]int{"one": 1, "two": 2}
+ nilMap := map[string]int(nil)
+ nv := (*map[string]int)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "map[string]int"
+ vs := "map[one:1 two:2]"
+ vs2 := "map[two:2 one:1]"
+ addFormatterTest("%v", v, vs, vs2)
+ addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2)
+ addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2)
+ addFormatterTest("%+v", nilMap, "")
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%+v", v, vs, vs2)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs,
+ "<**>("+pvAddr+"->"+vAddr+")"+vs2)
+ addFormatterTest("%+v", nilMap, "")
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2)
+ addFormatterTest("%#v", nilMap, "("+vt+")"+"")
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs,
+ "(*"+vt+")("+vAddr+")"+vs2)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs,
+ "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2)
+ addFormatterTest("%#+v", nilMap, "("+vt+")"+"")
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
+
+ // Map with custom formatter type on pointer receiver only keys and vals.
+ v2 := map[pstringer]pstringer{"one": "1"}
+ nv2 := (*map[pstringer]pstringer)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "map[spew_test.pstringer]spew_test.pstringer"
+ v2s := "map[stringer one:stringer 1]"
+ if spew.UnsafeDisabled {
+ v2s = "map[one:1]"
+ }
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", nv2, "")
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%+v", nv2, "")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"")
+
+ // Map with interface keys and values.
+ v3 := map[interface{}]interface{}{"one": 1}
+ nv3 := (*map[interface{}]interface{})(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "map[interface {}]interface {}"
+ v3t1 := "string"
+ v3t2 := "int"
+ v3s := "map[one:1]"
+ v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]"
+ addFormatterTest("%v", v3, v3s)
+ addFormatterTest("%v", pv3, "<*>"+v3s)
+ addFormatterTest("%v", &pv3, "<**>"+v3s)
+ addFormatterTest("%+v", nv3, "")
+ addFormatterTest("%+v", v3, v3s)
+ addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+ addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+ addFormatterTest("%+v", nv3, "")
+ addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
+ addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
+ addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"")
+ addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
+ addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
+ addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
+ addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"")
+
+ // Map with nil interface value
+ v4 := map[string]interface{}{"nil": nil}
+ nv4 := (*map[string]interface{})(nil)
+ pv4 := &v4
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "map[string]interface {}"
+ v4t1 := "interface {}"
+ v4s := "map[nil:]"
+ v4s2 := "map[nil:(" + v4t1 + ")]"
+ addFormatterTest("%v", v4, v4s)
+ addFormatterTest("%v", pv4, "<*>"+v4s)
+ addFormatterTest("%v", &pv4, "<**>"+v4s)
+ addFormatterTest("%+v", nv4, "")
+ addFormatterTest("%+v", v4, v4s)
+ addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
+ addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
+ addFormatterTest("%+v", nv4, "")
+ addFormatterTest("%#v", v4, "("+v4t+")"+v4s2)
+ addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2)
+ addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2)
+ addFormatterTest("%#v", nv4, "(*"+v4t+")"+"")
+ addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2)
+ addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2)
+ addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2)
+ addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"")
+}
+
+func addStructFormatterTests() {
+ // Struct with primitives.
+ type s1 struct {
+ a int8
+ b uint8
+ }
+ v := s1{127, 255}
+ nv := (*s1)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "spew_test.s1"
+ vt2 := "int8"
+ vt3 := "uint8"
+ vs := "{127 255}"
+ vs2 := "{a:127 b:255}"
+ vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%+v", v, vs2)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%#v", v, "("+vt+")"+vs3)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs3)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs3)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
+
+ // Struct that contains another struct.
+ type s2 struct {
+ s1 s1
+ b bool
+ }
+ v2 := s2{s1{127, 255}, true}
+ nv2 := (*s2)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "spew_test.s2"
+ v2t2 := "spew_test.s1"
+ v2t3 := "int8"
+ v2t4 := "uint8"
+ v2t5 := "bool"
+ v2s := "{{127 255} true}"
+ v2s2 := "{s1:{a:127 b:255} b:true}"
+ v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" +
+ v2t5 + ")true}"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", nv2, "")
+ addFormatterTest("%+v", v2, v2s2)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2)
+ addFormatterTest("%+v", nv2, "")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s3)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"")
+
+ // Struct that contains custom type with Stringer pointer interface via both
+ // exported and unexported fields.
+ type s3 struct {
+ s pstringer
+ S pstringer
+ }
+ v3 := s3{"test", "test2"}
+ nv3 := (*s3)(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "spew_test.s3"
+ v3t2 := "spew_test.pstringer"
+ v3s := "{stringer test stringer test2}"
+ v3sp := v3s
+ v3s2 := "{s:stringer test S:stringer test2}"
+ v3s2p := v3s2
+ v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}"
+ v3s3p := v3s3
+ if spew.UnsafeDisabled {
+ v3s = "{test test2}"
+ v3sp = "{test stringer test2}"
+ v3s2 = "{s:test S:test2}"
+ v3s2p = "{s:test S:stringer test2}"
+ v3s3 = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")test2}"
+ v3s3p = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")stringer test2}"
+ }
+ addFormatterTest("%v", v3, v3s)
+ addFormatterTest("%v", pv3, "<*>"+v3sp)
+ addFormatterTest("%v", &pv3, "<**>"+v3sp)
+ addFormatterTest("%+v", nv3, "")
+ addFormatterTest("%+v", v3, v3s2)
+ addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s2p)
+ addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s2p)
+ addFormatterTest("%+v", nv3, "")
+ addFormatterTest("%#v", v3, "("+v3t+")"+v3s3)
+ addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s3p)
+ addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s3p)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"")
+ addFormatterTest("%#+v", v3, "("+v3t+")"+v3s3)
+ addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s3p)
+ addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s3p)
+ addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"")
+
+ // Struct that contains embedded struct and field to same struct.
+ e := embed{"embedstr"}
+ v4 := embedwrap{embed: &e, e: &e}
+ nv4 := (*embedwrap)(nil)
+ pv4 := &v4
+ eAddr := fmt.Sprintf("%p", &e)
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "spew_test.embedwrap"
+ v4t2 := "spew_test.embed"
+ v4t3 := "string"
+ v4s := "{<*>{embedstr} <*>{embedstr}}"
+ v4s2 := "{embed:<*>(" + eAddr + "){a:embedstr} e:<*>(" + eAddr +
+ "){a:embedstr}}"
+ v4s3 := "{embed:(*" + v4t2 + "){a:(" + v4t3 + ")embedstr} e:(*" + v4t2 +
+ "){a:(" + v4t3 + ")embedstr}}"
+ v4s4 := "{embed:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 +
+ ")embedstr} e:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + ")embedstr}}"
+ addFormatterTest("%v", v4, v4s)
+ addFormatterTest("%v", pv4, "<*>"+v4s)
+ addFormatterTest("%v", &pv4, "<**>"+v4s)
+ addFormatterTest("%+v", nv4, "")
+ addFormatterTest("%+v", v4, v4s2)
+ addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s2)
+ addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s2)
+ addFormatterTest("%+v", nv4, "")
+ addFormatterTest("%#v", v4, "("+v4t+")"+v4s3)
+ addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s3)
+ addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s3)
+ addFormatterTest("%#v", nv4, "(*"+v4t+")"+"")
+ addFormatterTest("%#+v", v4, "("+v4t+")"+v4s4)
+ addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s4)
+ addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s4)
+ addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"")
+}
+
+func addUintptrFormatterTests() {
+ // Null pointer.
+ v := uintptr(0)
+ nv := (*uintptr)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "uintptr"
+ vs := ""
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
+
+ // Address of real variable.
+ i := 1
+ v2 := uintptr(unsafe.Pointer(&i))
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "uintptr"
+ v2s := fmt.Sprintf("%p", &i)
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+}
+
+func addUnsafePointerFormatterTests() {
+ // Null pointer.
+ v := unsafe.Pointer(nil)
+ nv := (*unsafe.Pointer)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "unsafe.Pointer"
+ vs := "