diff --git a/CHANGELOG.md b/CHANGELOG.md index be970025a..7b68d9696 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,12 @@ # Changelog -## [v0.4.0](https://github.com/docker/notary/releases/tag/v0.4.0) 8/11/2016 +## [v0.4.1](https://github.com/docker/notary/releases/tag/v0.4.0) 9/27/2016 ++ Preliminary Windows support for notary client [#970](https://github.com/docker/notary/pull/970) ++ Output message to CLI when repo changes have been successfully published [#974](https://github.com/docker/notary/pull/974) ++ Improved error messages for client authentication errors and for the witness command [#972](https://github.com/docker/notary/pull/972) ++ Support for finding keys that are anywhere in the notary directory's "private" directory, not just under "private/root_keys" or "private/tuf_keys" [#981](https://github.com/docker/notary/pull/981) + +## [v0.4.0](https://github.com/docker/notary/releases/tag/v0.4.0) 9/21/2016 + Server-managed key rotations [#889](https://github.com/docker/notary/pull/889) + Remove `timestamp_keys` table, which stored redundant information [#889](https://github.com/docker/notary/pull/889) + Introduce `notary delete` command to delete local and/or remote repo data [#895](https://github.com/docker/notary/pull/895) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 649c48990..7089a6a95 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -148,88 +148,88 @@ }, { "ImportPath": "github.com/docker/distribution", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/distribution/context", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/distribution/digest", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/distribution/health", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/distribution/reference", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/distribution/registry/api/errcode", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/distribution/registry/api/v2", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/distribution/registry/auth", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/distribution/registry/auth/htpasswd", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/distribution/registry/auth/silly", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/distribution/registry/auth/token", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/distribution/registry/client", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/distribution/registry/client/auth", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/distribution/registry/client/transport", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/distribution/registry/storage/cache", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/distribution/registry/storage/cache/memory", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/distribution/uuid", - "Comment": "v2.2.1-20-gc56d49b", - "Rev": "c56d49b111aea675a81d411c2db1acfac6179de9" + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" }, { "ImportPath": "github.com/docker/docker/pkg/system", @@ -928,6 +928,41 @@ { "ImportPath": "golang.org/x/net/lex/httplex", "Rev": "6a513affb38dc9788b449d59ffed099b8de18fa0" + }, + { + "ImportPath": "github.com/docker/distribution/vendor/github.com/Sirupsen/logrus", + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" + }, + { + "ImportPath": "github.com/docker/distribution/vendor/github.com/gorilla/mux", + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" + }, + { + "ImportPath": "github.com/docker/distribution/vendor/golang.org/x/net/context", + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" + }, + { + "ImportPath": "github.com/docker/distribution/vendor/github.com/gorilla/context", + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" + }, + { + "ImportPath": "github.com/docker/distribution/vendor/golang.org/x/crypto/bcrypt", + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" + }, + { + "ImportPath": "github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish", + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" + }, + { + "ImportPath": "github.com/docker/distribution/vendor/github.com/docker/libtrust", + "Comment": "v2.5.1", + "Rev": "12acdf0a6c1e56d965ac6eb395d2bce687bf22fc" } ] } diff --git a/Makefile b/Makefile index e415c053a..c5fcbcc92 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ endif CTIMEVAR=-X $(NOTARY_PKG)/version.GitCommit=$(GITCOMMIT) -X $(NOTARY_PKG)/version.NotaryVersion=$(NOTARY_VERSION) GO_LDFLAGS=-ldflags "-w $(CTIMEVAR)" GO_LDFLAGS_STATIC=-ldflags "-w $(CTIMEVAR) -extldflags -static" -GOOSES = darwin linux +GOOSES = darwin linux windows NOTARY_BUILDTAGS ?= pkcs11 NOTARYDIR := /go/src/github.com/docker/notary diff --git a/NOTARY_VERSION b/NOTARY_VERSION index bd73f4707..267577d47 100644 --- a/NOTARY_VERSION +++ b/NOTARY_VERSION @@ -1 +1 @@ -0.4 +0.4.1 diff --git a/buildscripts/circle_parallelism.sh b/buildscripts/circle_parallelism.sh index cc07fdc2d..961d90a6a 100755 --- a/buildscripts/circle_parallelism.sh +++ b/buildscripts/circle_parallelism.sh @@ -8,6 +8,7 @@ case $CIRCLE_NODE_INDEX in ;; 2) SKIPENVCHECK=1 make TESTDB=mysql testdb SKIPENVCHECK=1 make TESTDB=mysql integration + SKIPENVCHECK=1 make cross # just trying not to exceed 5 builders ;; 3) SKIPENVCHECK=1 make TESTDB=rethink testdb SKIPENVCHECK=1 make TESTDB=rethink integration diff --git a/buildscripts/covertest.py b/buildscripts/covertest.py index ee2f22e67..99fec4101 100755 --- a/buildscripts/covertest.py +++ b/buildscripts/covertest.py @@ -32,7 +32,7 @@ def get_coverprofile_filename(pkg, buildtags): buildtags = "." + buildtags.replace(' ', '.') return pkg.replace('/', '-').replace(' ', '_') + buildtags + ".coverage.txt" -def run_test_with_coverage(buildtags="", coverdir=".cover", pkgs=None, opts="", covermode="count"): +def run_test_with_coverage(buildtags="", coverdir=".cover", pkgs=None, opts="", covermode="atomic"): """ Run go test with coverage over the the given packages, with the following options """ diff --git a/buildscripts/cross.sh b/buildscripts/cross.sh index 840a751aa..3e257da33 100755 --- a/buildscripts/cross.sh +++ b/buildscripts/cross.sh @@ -7,15 +7,10 @@ GOARCH="amd64" -if [[ "${NOTARY_BUILDTAGS}" == *pkcs11* ]]; then - export CGO_ENABLED=1 -else - export CGO_ENABLED=0 -fi - - for os in "$@"; do export GOOS="${os}" + BUILDTAGS="${NOTARY_BUILDTAGS}" + OUTFILE=notary if [[ "${GOOS}" == "darwin" ]]; then export CC="o64-clang" @@ -24,18 +19,29 @@ for os in "$@"; do # darwin binaries can't be compiled to be completely static with the -static flag LDFLAGS="-s" else + # no building with Cgo. Also no building with pkcs11 + if [[ "${GOOS}" == "windows" ]]; then + BUILDTAGS= + OUTFILE=notary.exe + fi unset CC unset CXX LDFLAGS="-extldflags -static" fi + if [[ "${BUILDTAGS}" == *pkcs11* ]]; then + export CGO_ENABLED=1 + else + export CGO_ENABLED=0 + fi + mkdir -p "${NOTARYDIR}/cross/${GOOS}/${GOARCH}"; set -x; go build \ - -o "${NOTARYDIR}/cross/${GOOS}/${GOARCH}/notary" \ + -o "${NOTARYDIR}/cross/${GOOS}/${GOARCH}/${OUTFILE}" \ -a \ - -tags "${NOTARY_BUILDTAGS}" \ + -tags "${BUILDTAGS}" \ -ldflags "-w ${CTIMEVAR} ${LDFLAGS}" \ ./cmd/notary; set +x; diff --git a/buildscripts/dockertest.py b/buildscripts/dockertest.py index ded47e08e..2471bb5f9 100644 --- a/buildscripts/dockertest.py +++ b/buildscripts/dockertest.py @@ -1,5 +1,19 @@ """ Script that automates trusted pull/pushes on different docker versions. + +Usage: python buildscripts/dockertest.py + +- assumes that this is run from the root notary directory +- assumes that bin/client already exists +- assumes you are logged in with docker + +- environment variables to provide: + - DEBUG=true - produce debug output + - DOCKER_CONTENT_TRUST_SERVER= test against a non-local + notary server + - NOTARY_SERVER_USERNAME= login creds username to notary server + - NOTARY_SERVER_PASSPHRASE= login creds password to notary server + - DOCKER_USERNAME= docker hub login username """ from __future__ import print_function @@ -30,9 +44,12 @@ DOWNLOAD_DOCKERS = { "1.10": ("https://get.docker.com", "docker-1.10.3"), "1.11": ("https://get.docker.com", "docker-1.11.2"), - "1.12": ("https://get.docker.com", "docker-1.12.0"), + "1.12": ("https://get.docker.com", "docker-1.12.1"), } +NOTARY_VERSION = "0.4.1" # only version that will work with docker < 1.13 +NOTARY_BINARY = "bin/notary" + # please replace with private registry if you want to test against a private # registry REGISTRY = "docker.io" @@ -47,25 +64,32 @@ # Assumes default docker config dir DEFAULT_DOCKER_CONFIG = os.path.expanduser("~/.docker") -# Assumes the trust server will be run using compose if DOCKER_CONTENT_TRUST_SERVER is not specified +# Assumes the trust server will be run using compose if +# DOCKER_CONTENT_TRUST_SERVER is not specified DEFAULT_NOTARY_SERVER = "https://notary-server:4443" # please enter a custom trust server location if you do not wish to use a local -# docker-compose instantiation. If testing against Docker Hub's notary server or -# another trust server, please also ensure that this script does not pick up incorrect TLS -# certificates from ~/.notary/config.json by default -TRUST_SERVER = os.getenv('DOCKER_CONTENT_TRUST_SERVER', DEFAULT_NOTARY_SERVER) +# docker-compose instantiation. If testing against Docker Hub's notary server +# or another trust server, please also ensure that this script does not pick up +# incorrect TLS certificates from ~/.notary/config.json by default +TRUST_SERVER = os.getenv('DOCKER_CONTENT_TRUST_SERVER', DEFAULT_NOTARY_SERVER) + # Assumes the test will be run with `python misc/dockertest.py` from # the root of the notary repo after binaries are built # also overrides the notary server location if need be if TRUST_SERVER != DEFAULT_NOTARY_SERVER: - NOTARY_CLIENT = "bin/notary -s {0}".format(TRUST_SERVER) + NOTARY_CLIENT = "{client} -s {server}".format( + client=NOTARY_BINARY, server=TRUST_SERVER) else: - NOTARY_CLIENT = "bin/notary -c cmd/notary/config.json" + NOTARY_CLIENT = "{client} -c cmd/notary/config.json".format( + client=NOTARY_BINARY) + +DEBUG = " -D" if os.getenv('DEBUG') else "" # ---- setup ---- + def download_docker(download_dir="/tmp"): """ Downloads the relevant docker binaries and sets the docker values @@ -92,9 +116,13 @@ def download_docker(download_dir="/tmp"): if not os.path.isfile(tarfilename): url = urljoin( - # as of 1.10 docker downloads are tar-ed due to potentially containing containerd etc. - # note that for windows (which we don't currently support), it's a .zip file - domain, "/".join(["builds", system, architecture, binary+".tgz"])) + # as of 1.10 docker downloads are tar-ed due to potentially + # containing containerd etc. + # note that for windows (which we don't currently support), + # it's a .zip file + domain, "/".join( + ["builds", system, architecture, binary+".tgz"])) + print("Downloading", url) downloadfile.retrieve(url, tarfilename) @@ -110,7 +138,27 @@ def download_docker(download_dir="/tmp"): os.chmod(fname, 0755) if not os.path.isfile(DOCKERS[version]): - raise Exception("Extracted {0} to {1} but could not find {1}".format(tarfilename, extractdir, filename)) + raise Exception( + "Extracted {tar} to {loc} but could not find {docker}".format( + tar=tarfilename, loc=extractdir, docker=DOCKERS[version])) + + +def verify_notary(): + """ + Check that notary is the right version + """ + if not os.path.isfile(NOTARY_BINARY): + raise Exception("notary client does not exist: " + NOTARY_BINARY) + + output = subprocess.check_output([NOTARY_BINARY, "version"]).strip() + lines = output.split("\n") + if len(lines) != 3: + print(output) + raise Exception("notary version output invalid") + + if lines[1].split()[-1] > NOTARY_VERSION: + print(output) + raise Exception("notary version too high: must be <= " + NOTARY_VERSION) def setup(): @@ -118,12 +166,19 @@ def setup(): Ensure we are set up to run the test """ download_docker() + verify_notary() + # ensure that we have the alpine image + subprocess.call("docker pull alpine".split()) + # copy the docker config dir over so we don't break anything in real docker # config directory os.mkdir(_TEMP_DOCKER_CONFIG_DIR) + # copy any docker creds over so we can push configfile = os.path.join(_TEMP_DOCKER_CONFIG_DIR, "config.json") - shutil.copyfile(os.path.join(DEFAULT_DOCKER_CONFIG, "config.json"), configfile) + shutil.copyfile( + os.path.join(DEFAULT_DOCKER_CONFIG, "config.json"), configfile) + # always clean up the config file so creds aren't left in this temp directory atexit.register(os.remove, configfile) defaulttlsdir = os.path.join(DEFAULT_DOCKER_CONFIG, "tls") @@ -192,6 +247,7 @@ def clear_tuf(): if "No such file or directory" not in str(ex): raise + def clear_keys(): """ Removes the TUF keys in trust directory, since the key format changed @@ -205,27 +261,39 @@ def clear_keys(): raise -def run_cmd(cmd, fileoutput): +def run_cmd(cmd, fileoutput, input=None): """ Takes a string command, runs it, and returns the output even if it fails. """ print("$ " + cmd) - fileoutput.write("$ {0}\n".format(cmd)) - try: - output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT, - env=_ENV) - except subprocess.CalledProcessError as ex: - print(ex.output) - fileoutput.write(ex.output) - raise + fileoutput.write("$ {cmd}\n".format(cmd=cmd)) + + if input is not None: + process = subprocess.Popen( + cmd.split(), env=_ENV, stderr=subprocess.STDOUT, + stdin=subprocess.PIPE, stdout=subprocess.PIPE) + + process.stdin.write(input) + process.stdin.close() else: - if output: - print(output) - fileoutput.write(output) - return output - finally: - print() - fileoutput.write("\n") + process = subprocess.Popen(cmd.split(), env=_ENV, stderr=subprocess.STDOUT, + stdout=subprocess.PIPE) + output = "" + while process.poll() is None: + line = process.stdout.readline() + print(line.strip("\n")) + fileoutput.write(line) + if "level=debug" not in line: + output += line + + retcode = process.poll() + print() + fileoutput.write("\n") + + if retcode: + raise subprocess.CalledProcessError(retcode, cmd, output=output) + + return output def rmi(fout, docker_version, image, tag): @@ -234,12 +302,14 @@ def rmi(fout, docker_version, image, tag): """ try: run_cmd( - "{0} rmi {1}:{2}".format(DOCKERS[docker_version], image, tag), + "{docker} rmi {image}:{tag}".format( + docker=DOCKERS[docker_version], image=image, tag=tag), fout) except subprocess.CalledProcessError as ex: if "could not find image" not in str(ex): raise + def assert_equality(actual, expected): """ Assert equality, print nice message @@ -255,9 +325,10 @@ def pull(fout, docker_version, image, tag, expected_sha): """ clear_tuf() rmi(fout, docker_version, image, tag) - output = run_cmd("{0} pull {1}:{2}".format(DOCKERS[docker_version], - image, tag), - fout) + output = run_cmd( + "{docker}{debug} pull {image}:{tag}".format( + docker=DOCKERS[docker_version], image=image, tag=tag, debug=DEBUG), + fout) sha = _DIGEST_REGEX.search(output).group(1) assert_equality(sha, expected_sha) @@ -271,13 +342,15 @@ def push(fout, docker_version, image, tag): # tag image with the docker version run_cmd( - "{0} tag alpine {1}:{2}".format(DOCKERS[docker_version], image, tag), + "{docker} tag alpine {image}:{tag}".format( + docker=DOCKERS[docker_version], image=image, tag=tag), fout) # push! - output = run_cmd("{0} push {1}:{2}".format(DOCKERS[docker_version], - image, tag), - fout) + output = run_cmd( + "{docker}{debug} push {image}:{tag}".format( + docker=DOCKERS[docker_version], image=image, tag=tag, debug=DEBUG), + fout) sha = _DIGEST_REGEX.search(output).group(1) size = _SIZE_REGEX.search(output).group(1) @@ -292,6 +365,18 @@ def push(fout, docker_version, image, tag): return sha, size +def get_notary_usernamepass(): + """ + Gets the username password for the notary server + """ + username = os.getenv("NOTARY_SERVER_USERNAME") + passwd = os.getenv("NOTARY_SERVER_PASSPHRASE") + + if username and passwd: + return username + "\n" + passwd + "\n" + return None + + def notary_list(fout, repo): """ Calls notary list on the repo and returns a list of lists of tags, shas, @@ -299,7 +384,9 @@ def notary_list(fout, repo): """ clear_tuf() output = run_cmd( - "{0} -d {1} list {2}".format(NOTARY_CLIENT, _TRUST_DIR, repo), fout) + "{notary}{debug} -d {trustdir} list {gun}".format( + notary=NOTARY_CLIENT, trustdir=_TRUST_DIR, gun=repo, debug=DEBUG), + fout, input=get_notary_usernamepass()) lines = output.strip().split("\n") assert len(lines) >= 3, "not enough targets" return [line.strip().split() for line in lines[2:]] @@ -312,13 +399,16 @@ def test_build(fout, image, docker_version): clear_tuf() # build # simple dockerfile to test building with trust - dockerfile = "FROM {0}:{1}\nRUN sh\n".format(image, docker_version) + dockerfile = "FROM {image}:{tag}\nRUN sh\n".format( + image=image, tag=docker_version) tempdir_dockerfile = os.path.join(_TEMPDIR, "Dockerfile") with open(tempdir_dockerfile, 'wb') as ftemp: - ftemp.write(dockerfile) + ftemp.write(dockerfile) output = run_cmd( - "{0} build {1}".format(DOCKERS[docker_version], _TEMPDIR), fout) + "{docker}{debug} build {context}".format( + docker=DOCKERS[docker_version], context=_TEMPDIR, debug=DEBUG), + fout) build_result = _BUILD_REGEX.findall(output) assert len(build_result) >= 0, "build did not succeed" @@ -335,7 +425,8 @@ def test_pull_a(fout, docker_version, image, expected_tags): # pull -a output = run_cmd( - "{0} pull -a {1}".format(DOCKERS[docker_version], image), fout) + "{docker}{debug} pull -a {image}".format( + docker=DOCKERS[docker_version], image=image, debug=DEBUG), fout) pulled_tags = _PULL_A_REGEX.findall(output) assert_equality(len(pulled_tags), len(expected_tags)) @@ -395,7 +486,9 @@ def test_run(fout, image, docker_version): clear_tuf() # run output = run_cmd( - "{0} run -it --rm {1}:{2} echo SUCCESS".format(DOCKERS[docker_version], image, docker_version), fout) + "{docker}{debug} run -it --rm {image}:{tag} echo SUCCESS".format( + docker=DOCKERS[docker_version], image=image, tag=docker_version, + debug=DEBUG), fout) assert "SUCCESS" in output, "run did not succeed" @@ -491,12 +584,13 @@ def rotate_to_server_snapshot(fout, image): Uses the notary client to rotate the snapshot key to be server-managed. """ run_cmd( - "{0} -d {1} key rotate {2} snapshot -r".format( - NOTARY_CLIENT, _TRUST_DIR, image), - fout) + "{notary}{debug} -d {trustdir} key rotate {gun} snapshot -r".format( + notary=NOTARY_CLIENT, trustdir=_TRUST_DIR, gun=image, debug=DEBUG), + fout, input=get_notary_usernamepass()) run_cmd( - "{0} -d {1} publish {2}".format(NOTARY_CLIENT, _TRUST_DIR, image), - fout) + "{notary}{debug} -d {trustdir} publish {gun}".format( + notary=NOTARY_CLIENT, trustdir=_TRUST_DIR, gun=image, debug=DEBUG), + fout, input=get_notary_usernamepass()) def test_all_docker_versions(): diff --git a/client/witness.go b/client/witness.go index 6600b60fc..21a42aac4 100644 --- a/client/witness.go +++ b/client/witness.go @@ -1,10 +1,11 @@ package client import ( + "path/filepath" + "github.com/docker/notary/client/changelist" "github.com/docker/notary/tuf" "github.com/docker/notary/tuf/data" - "path/filepath" ) // Witness creates change objects to witness (i.e. re-sign) the given @@ -41,7 +42,18 @@ func witnessTargets(repo *tuf.Repo, invalid *tuf.Repo, role string) error { r.Dirty = true return nil } - if invalid != nil { + + if roleObj, err := repo.GetDelegationRole(role); err == nil && invalid != nil { + // A role with a threshold > len(keys) is technically invalid, but we let it build in the builder because + // we want to be able to download the role (which may still have targets on it), add more keys, and then + // witness the role, thus bringing it back to valid. However, if no keys have been added before witnessing, + // then it is still an invalid role, and can't be witnessed because nothing can bring it back to valid. + if roleObj.Threshold > len(roleObj.Keys) { + return data.ErrInvalidRole{ + Role: role, + Reason: "role does not specify enough valid signing keys to meet its required threshold", + } + } if r, ok := invalid.Targets[role]; ok { // role is recognized but invalid, move to valid data and mark for re-signing repo.Targets[role] = r diff --git a/cmd/notary-server/config.go b/cmd/notary-server/config.go index 91eb6c0c6..daff08550 100644 --- a/cmd/notary-server/config.go +++ b/cmd/notary-server/config.go @@ -3,12 +3,9 @@ package main import ( "crypto/tls" "fmt" - "os" - "os/signal" "path" "strconv" "strings" - "syscall" "time" "github.com/Sirupsen/logrus" @@ -293,31 +290,3 @@ func parseServerConfig(configFilePath string, hRegister healthRegister, doBootst ConsistentCacheControlConfig: consistentCache, }, nil } - -func setupSignalTrap() { - c := make(chan os.Signal, 1) - signal.Notify(c, notary.NotarySupportedSignals...) - go func() { - for { - signalHandle(<-c) - } - }() -} - -// signalHandle will increase/decrease the logging level via the signal we get. -func signalHandle(sig os.Signal) { - switch sig { - case syscall.SIGUSR1: - if err := utils.AdjustLogLevel(true); err != nil { - fmt.Printf("Attempt to increase log level failed, will remain at %s level, error: %s\n", logrus.GetLevel(), err) - return - } - case syscall.SIGUSR2: - if err := utils.AdjustLogLevel(false); err != nil { - fmt.Printf("Attempt to decrease log level failed, will remain at %s level, error: %s\n", logrus.GetLevel(), err) - return - } - } - - fmt.Println("Successfully setting log level to ", logrus.GetLevel()) -} diff --git a/cmd/notary-server/main.go b/cmd/notary-server/main.go index c2cdf56e7..232e486c7 100644 --- a/cmd/notary-server/main.go +++ b/cmd/notary-server/main.go @@ -7,10 +7,12 @@ import ( "net/http" _ "net/http/pprof" "os" + "os/signal" "github.com/Sirupsen/logrus" "github.com/docker/distribution/health" "github.com/docker/notary/server" + "github.com/docker/notary/utils" "github.com/docker/notary/version" ) @@ -61,7 +63,10 @@ func main() { logrus.Fatal(err.Error()) } - setupSignalTrap() + c := utils.SetupSignalTrap(utils.LogLevelSignalHandle) + if c != nil { + defer signal.Stop(c) + } if flagStorage.doBootstrap { err = bootstrap(ctx) diff --git a/cmd/notary-server/main_test.go b/cmd/notary-server/main_test.go index 14fbd69bc..4bcc0c3fb 100644 --- a/cmd/notary-server/main_test.go +++ b/cmd/notary-server/main_test.go @@ -6,14 +6,11 @@ import ( "fmt" "io/ioutil" "os" - "path/filepath" "reflect" "strings" - "syscall" "testing" "time" - "github.com/Sirupsen/logrus" "github.com/docker/distribution/health" "github.com/docker/notary" "github.com/docker/notary/server/storage" @@ -416,30 +413,3 @@ func TestSampleConfig(t *testing.T) { // once for the DB, once for the trust service require.Equal(t, registerCalled, 2) } - -func TestSignalHandle(t *testing.T) { - tempdir, err := ioutil.TempDir("", "test-signal-handle") - require.NoError(t, err) - defer os.RemoveAll(tempdir) - f, err := os.Create(filepath.Join(tempdir, "testSignalHandle.json")) - require.NoError(t, err) - - f.WriteString(`{"logging": {"level": "info"}}`) - - v := viper.New() - utils.SetupViper(v, "envPrefix") - err = utils.ParseViper(v, f.Name()) - require.NoError(t, err) - - // Info + SIGUSR1 -> Debug - signalHandle(syscall.SIGUSR1) - require.Equal(t, logrus.GetLevel(), logrus.DebugLevel) - - // Debug + SIGUSR1 -> Debug - signalHandle(syscall.SIGUSR1) - require.Equal(t, logrus.GetLevel(), logrus.DebugLevel) - - // Debug + SIGUSR2-> Info - signalHandle(syscall.SIGUSR2) - require.Equal(t, logrus.GetLevel(), logrus.InfoLevel) -} diff --git a/cmd/notary-signer/main.go b/cmd/notary-signer/main.go index d15f124ae..d7459fc62 100644 --- a/cmd/notary-signer/main.go +++ b/cmd/notary-signer/main.go @@ -6,8 +6,10 @@ import ( "log" "net/http" "os" + "os/signal" "github.com/Sirupsen/logrus" + "github.com/docker/notary/utils" "github.com/docker/notary/version" _ "github.com/go-sql-driver/mysql" ) @@ -66,6 +68,11 @@ func main() { log.Println("RPC server listening on", signerConfig.GRPCAddr) } + c := utils.SetupSignalTrap(utils.LogLevelSignalHandle) + if c != nil { + defer signal.Stop(c) + } + grpcServer.Serve(lis) } diff --git a/cmd/notary/integration_test.go b/cmd/notary/integration_test.go index de9320292..ebb00ce41 100644 --- a/cmd/notary/integration_test.go +++ b/cmd/notary/integration_test.go @@ -1471,6 +1471,8 @@ func TestPurgeSingleKey(t *testing.T) { // 11. witness an invalid role and check for error on publish // 12. check non-targets base roles all fail // 13. test auto-publish functionality +// 14. remove all keys from the delegation and publish +// 15. witnessing the delegation should now fail func TestWitness(t *testing.T) { setUp(t) @@ -1637,6 +1639,12 @@ func TestWitness(t *testing.T) { require.NoError(t, err) require.Contains(t, output, targetName) require.Contains(t, output, targetHash) + + _, err = runCommand(t, tempDir, "-s", server.URL, "delegation", "remove", "-p", "gun", delgName, keyID, keyID2) + require.NoError(t, err) + _, err = runCommand(t, tempDir, "-s", server.URL, "witness", "-p", "gun", delgName) + require.Error(t, err) + require.Contains(t, err.Error(), "role does not specify enough valid signing keys to meet its required threshold") } func generateCertPrivKeyPair(t *testing.T, gun, keyAlgorithm string) (*x509.Certificate, data.PrivateKey, string) { diff --git a/cmd/notary/keys.go b/cmd/notary/keys.go index fea560d76..b6028293d 100644 --- a/cmd/notary/keys.go +++ b/cmd/notary/keys.go @@ -240,7 +240,11 @@ func (k *keyCommander) keysRotate(cmd *cobra.Command, args []string) error { } } - return nRepo.RotateKey(rotateKeyRole, k.rotateKeyServerManaged) + if err := nRepo.RotateKey(rotateKeyRole, k.rotateKeyServerManaged); err != nil { + return err + } + cmd.Printf("Successfully rotated %s key for repository %s\n", rotateKeyRole, gun) + return nil } func removeKeyInteractively(keyStores []trustmanager.KeyStore, keyID string, diff --git a/cmd/notary/main_test.go b/cmd/notary/main_test.go index 5b9f916e7..3d8541ec4 100644 --- a/cmd/notary/main_test.go +++ b/cmd/notary/main_test.go @@ -8,6 +8,7 @@ import ( "net/http/httptest" "os" "path/filepath" + "strconv" "strings" "testing" "time" @@ -343,10 +344,10 @@ func TestConfigFileTLSCanBeRelativeToConfigOrAbsolute(t *testing.T) { "remote_server": { "url": "%s", "root_ca": "root-ca.crt", - "tls_client_cert": "%s", + "tls_client_cert": %s, "tls_client_key": "notary-server.key" } - }`, s.URL, filepath.Join(tempDir, "notary-server.crt")) + }`, s.URL, strconv.Quote(filepath.Join(tempDir, "notary-server.crt"))) configFile.Close() // copy the certs to be relative to the config directory diff --git a/cmd/notary/tuf.go b/cmd/notary/tuf.go index e9dba68ed..e0ae6cc0c 100644 --- a/cmd/notary/tuf.go +++ b/cmd/notary/tuf.go @@ -13,6 +13,8 @@ import ( "strings" "time" + "golang.org/x/crypto/ssh/terminal" + "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/transport" @@ -348,11 +350,13 @@ func (t *tufCommander) tufDeleteGUN(cmd *cobra.Command, args []string) error { // Only initialize a roundtripper if we get the remote flag var rt http.RoundTripper + var remoteDeleteInfo string if t.deleteRemote { rt, err = getTransport(config, gun, admin) if err != nil { return err } + remoteDeleteInfo = " and remote" } nRepo, err := notaryclient.NewNotaryRepository( @@ -362,9 +366,13 @@ func (t *tufCommander) tufDeleteGUN(cmd *cobra.Command, args []string) error { return err } - cmd.Printf("Deleting trust data for repository %s.\n", gun) + cmd.Printf("Deleting trust data for repository %s\n", gun) - return nRepo.DeleteTrustData(t.deleteRemote) + if err := nRepo.DeleteTrustData(t.deleteRemote); err != nil { + return err + } + cmd.Printf("Successfully deleted local%s trust data for repository %s\n", remoteDeleteInfo, gun) + return nil } func (t *tufCommander) tufInit(cmd *cobra.Command, args []string) error { @@ -619,9 +627,15 @@ func (t *tufCommander) tufReset(cmd *cobra.Command, args []string) error { } if t.resetAll { - return cl.Clear(t.archiveChangelist) + err = cl.Clear(t.archiveChangelist) + } else { + err = cl.Remove(t.deleteIdx) } - return cl.Remove(t.deleteIdx) + // If it was a success, print to terminal + if err == nil { + cmd.Printf("Successfully reset specified changes for repository %s\n", gun) + } + return err } func (t *tufCommander) tufPublish(cmd *cobra.Command, args []string) error { @@ -654,10 +668,7 @@ func (t *tufCommander) tufPublish(cmd *cobra.Command, args []string) error { return err } - if err = nRepo.Publish(); err != nil { - return err - } - return nil + return publishAndPrintToCLI(cmd, nRepo, gun) } func (t *tufCommander) tufRemove(cmd *cobra.Command, args []string) error { @@ -746,7 +757,8 @@ type passwordStore struct { } func (ps passwordStore) Basic(u *url.URL) (string, string) { - if ps.anonymous { + // if it's not a terminal, don't wait on input + if ps.anonymous || !terminal.IsTerminal(int(os.Stdin.Fd())) { return "", "" } @@ -786,6 +798,15 @@ func (ps passwordStore) Basic(u *url.URL) (string, string) { return username, password } +// to comply with the CredentialStore interface +func (ps passwordStore) RefreshToken(u *url.URL, service string) string { + return "" +} + +// to comply with the CredentialStore interface +func (ps passwordStore) SetRefreshToken(u *url.URL, service string, token string) { +} + type httpAccess int const ( @@ -1005,5 +1026,13 @@ func maybeAutoPublish(cmd *cobra.Command, doPublish bool, gun string, config *vi } cmd.Println("Auto-publishing changes to", gun) - return nRepo.Publish() + return publishAndPrintToCLI(cmd, nRepo, gun) +} + +func publishAndPrintToCLI(cmd *cobra.Command, nRepo *notaryclient.NotaryRepository, gun string) error { + if err := nRepo.Publish(); err != nil { + return err + } + cmd.Printf("Successfully published changes for repository %s\n", gun) + return nil } diff --git a/cmd/notary/tuf_test.go b/cmd/notary/tuf_test.go index 913f80529..de3edba07 100644 --- a/cmd/notary/tuf_test.go +++ b/cmd/notary/tuf_test.go @@ -3,10 +3,12 @@ package main import ( "net/http" "net/http/httptest" + "net/url" "os" "path/filepath" "testing" + "github.com/docker/distribution/registry/client/auth" "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/stretchr/testify/require" @@ -219,3 +221,18 @@ func TestGetTrustPinningErrors(t *testing.T) { tc.sha256 = "88b76b34ab83a9e4d5abe3697950fb73f940aab1aa5b534f80cf9de9708942be" require.Error(t, tc.tufAddByHash(&cobra.Command{}, []string{"gun", "test1", "100"})) } + +func TestPasswordStore(t *testing.T) { + myurl, err := url.Parse("https://docker.io") + require.NoError(t, err) + + // whether or not we're anonymous, because this isn't a terminal, + for _, ps := range []auth.CredentialStore{passwordStore{}, passwordStore{anonymous: true}} { + username, passwd := ps.Basic(myurl) + require.Equal(t, "", username) + require.Equal(t, "", passwd) + + ps.SetRefreshToken(myurl, "someService", "token") // doesn't return an error, just want to make sure no state changes + require.Equal(t, "", ps.RefreshToken(myurl, "someService")) + } +} diff --git a/const.go b/const.go index 19752072b..0c4d4037b 100644 --- a/const.go +++ b/const.go @@ -1,10 +1,6 @@ package notary -import ( - "os" - "syscall" - "time" -) +import "time" // application wide constants const ( @@ -72,11 +68,3 @@ var NotaryDefaultExpiries = map[string]time.Duration{ "snapshot": NotarySnapshotExpiry, "timestamp": NotaryTimestampExpiry, } - -// NotarySupportedSignals contains the signals we would like to capture: -// - SIGUSR1, indicates a increment of the log level. -// - SIGUSR2, indicates a decrement of the log level. -var NotarySupportedSignals = []os.Signal{ - syscall.SIGUSR1, - syscall.SIGUSR2, -} diff --git a/const_nowindows.go b/const_nowindows.go new file mode 100644 index 000000000..67551717a --- /dev/null +++ b/const_nowindows.go @@ -0,0 +1,16 @@ +// +build !windows + +package notary + +import ( + "os" + "syscall" +) + +// NotarySupportedSignals contains the signals we would like to capture: +// - SIGUSR1, indicates a increment of the log level. +// - SIGUSR2, indicates a decrement of the log level. +var NotarySupportedSignals = []os.Signal{ + syscall.SIGUSR1, + syscall.SIGUSR2, +} diff --git a/const_windows.go b/const_windows.go new file mode 100644 index 000000000..e2dff0e4b --- /dev/null +++ b/const_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package notary + +import "os" + +// NotarySupportedSignals does not contain any signals, because SIGUSR1/2 are not supported on windows +var NotarySupportedSignals = []os.Signal{} diff --git a/docs/reference/server-config.md b/docs/reference/server-config.md index 2143bbe5f..d1cab23b9 100644 --- a/docs/reference/server-config.md +++ b/docs/reference/server-config.md @@ -362,10 +362,12 @@ Example: ## Hot logging level reload -We don't support completely reloading notary configuration files yet at present. What we support for now is: +We don't support completely reloading notary configuration files yet at present. What we support for Linux and OSX now is: - increase logging level by signaling `SIGUSR1` - decrease logging level by signaling `SIGUSR2` +No signals and no dynamic logging level changes are supported for Windows yet. + Example: To increase logging level diff --git a/docs/reference/signer-config.md b/docs/reference/signer-config.md index 7ced28e17..f5c35e0ae 100644 --- a/docs/reference/signer-config.md +++ b/docs/reference/signer-config.md @@ -210,6 +210,44 @@ The environment variables for the older passwords are optional, but Notary Signer will not be able to decrypt older keys if they are not provided, and attempts to sign data using those keys will fail. +## Hot logging level reload +We don't support completely reloading notary signer configuration files yet at present. What we support for Linux and OSX now is: +- increase logging level by signaling `SIGUSR1` +- decrease logging level by signaling `SIGUSR2` + +No signals and no dynamic logging level changes are supported for Windows yet. + +Example: + +To increase logging level +``` +$ kill -s SIGUSR1 PID + +or + +$ docker exec -i CONTAINER_ID kill -s SIGUSR1 PID +``` + +To decrease logging level +``` +$ kill -s SIGUSR2 PID + +or + +$ docker exec -i CONTAINER_ID kill -s SIGUSR2 PID +``` +PID is the process id of `notary-signer` and it may not the PID 1 process if you are running +the container with some kind of wrapper startup script or something. + +You can get the PID of `notary-signer` through +``` +$ docker exec CONTAINER_ID ps aux + +or + +$ ps aux | grep "notary-signer -config" | grep -v "grep" +``` + ## Related information diff --git a/trustmanager/keystore.go b/trustmanager/keystore.go index 624949efa..03d9d9687 100644 --- a/trustmanager/keystore.go +++ b/trustmanager/keystore.go @@ -173,26 +173,12 @@ func (s *GenericKeyStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error func (s *GenericKeyStore) GetKey(name string) (data.PrivateKey, string, error) { s.Lock() defer s.Unlock() - // If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds - if keyInfo, ok := s.keyInfoMap[name]; ok { - name = filepath.Join(keyInfo.Gun, name) - } - cachedKeyEntry, ok := s.cachedKeys[name] if ok { return cachedKeyEntry.key, cachedKeyEntry.alias, nil } - keyAlias, legacy, err := getKeyRole(s.store, name) - if err != nil { - return nil, "", err - } - - if legacy { - name = name + "_" + keyAlias - } - - keyBytes, err := s.store.Get(filepath.Join(getSubdir(keyAlias), name)) + keyBytes, _, keyAlias, err := getKey(s.store, name) if err != nil { return nil, "", err } @@ -218,25 +204,18 @@ func (s *GenericKeyStore) ListKeys() map[string]KeyInfo { func (s *GenericKeyStore) RemoveKey(keyID string) error { s.Lock() defer s.Unlock() - // If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds - if keyInfo, ok := s.keyInfoMap[keyID]; ok { - keyID = filepath.Join(keyInfo.Gun, keyID) - } - role, legacy, err := getKeyRole(s.store, keyID) - if err != nil { + _, filename, _, err := getKey(s.store, keyID) + switch err.(type) { + case ErrKeyNotFound, nil: + break + default: return err } delete(s.cachedKeys, keyID) - name := keyID - if legacy { - name = keyID + "_" + role - } - - // being in a subdirectory is for backwards compatibliity - err = s.store.Remove(filepath.Join(getSubdir(role), name)) + err = s.store.Remove(filename) // removing a file that doesn't exist doesn't fail if err != nil { return err } @@ -276,11 +255,11 @@ func KeyInfoFromPEM(pemBytes []byte, filename string) (string, KeyInfo, error) { return keyID, KeyInfo{Gun: gun, Role: role}, nil } -// getKeyRole finds the role for the given keyID. It attempts to look -// both in the newer format PEM headers, and also in the legacy filename -// format. It returns: the role, whether it was found in the legacy format -// (true == legacy), and an error -func getKeyRole(s Storage, keyID string) (string, bool, error) { +// getKey finds the key and role for the given keyID. It attempts to +// look both in the newer format PEM headers, and also in the legacy filename +// format. It returns: the key bytes, the filename it was found under, the role, +// and an error +func getKey(s Storage, keyID string) ([]byte, string, string, error) { name := strings.TrimSpace(strings.TrimSuffix(filepath.Base(keyID), filepath.Ext(keyID))) for _, file := range s.ListFiles() { @@ -289,21 +268,21 @@ func getKeyRole(s Storage, keyID string) (string, bool, error) { if strings.HasPrefix(filename, name) { d, err := s.Get(file) if err != nil { - return "", false, err + return nil, "", "", err } block, _ := pem.Decode(d) if block != nil { if role, ok := block.Headers["role"]; ok { - return role, false, nil + return d, file, role, nil } } role := strings.TrimPrefix(filename, name+"_") - return role, true, nil + return d, file, role, nil } } - return "", false, ErrKeyNotFound{KeyID: keyID} + return nil, "", "", ErrKeyNotFound{KeyID: keyID} } // Assumes 2 subdirectories, 1 containing root keys and 1 containing TUF keys diff --git a/trustmanager/keystore_test.go b/trustmanager/keystore_test.go index 12ecb2297..5fd6a8937 100644 --- a/trustmanager/keystore_test.go +++ b/trustmanager/keystore_test.go @@ -2,10 +2,12 @@ package trustmanager import ( "crypto/rand" + "encoding/pem" "errors" "fmt" "io/ioutil" "os" + "path" "path/filepath" "testing" @@ -167,29 +169,39 @@ func TestGet(t *testing.T) { gun := "docker.io/notary" - // Root role needs to go in the rootKeySubdir to be read. - // All other roles can go in the nonRootKeysSubdir, possibly under a GUN + // Root role currently goes into the rootKeySubdir, and all other roles go + // in the nonRootKeysSubdir, possibly under a GUN. nonRootKeysSubdirWithGUN := filepath.Clean(filepath.Join(notary.NonRootKeysSubdir, gun)) - - testGetKeyWithRole(t, "", data.CanonicalRootRole, notary.RootKeysSubdir, true) + testGetKeyWithRole(t, "", data.CanonicalRootRole, filepath.Join(notary.PrivDir, notary.RootKeysSubdir), true) for _, role := range nonRootRolesToTest { - testGetKeyWithRole(t, "", role, notary.NonRootKeysSubdir, true) - testGetKeyWithRole(t, gun, role, nonRootKeysSubdirWithGUN, true) + testGetKeyWithRole(t, "", role, filepath.Join(notary.PrivDir, notary.NonRootKeysSubdir), true) + testGetKeyWithRole(t, gun, role, filepath.Join(notary.PrivDir, nonRootKeysSubdirWithGUN), true) } - // Root cannot go in the nonRootKeysSubdir, or it won't be able to be read, - // and vice versa - testGetKeyWithRole(t, "", data.CanonicalRootRole, notary.NonRootKeysSubdir, false) - testGetKeyWithRole(t, gun, data.CanonicalRootRole, nonRootKeysSubdirWithGUN, false) - for _, role := range nonRootRolesToTest { - testGetKeyWithRole(t, "", role, notary.RootKeysSubdir, false) + // However, keys of any role can be read from anywhere in the private dir so long as + // it has the right ID + for _, expectedSubdir := range []string{ + notary.PrivDir, + filepath.Join(notary.PrivDir, nonRootKeysSubdirWithGUN), + filepath.Join(notary.PrivDir, notary.RootKeysSubdir), + filepath.Join(notary.PrivDir, notary.RootKeysSubdir, gun), + filepath.Join(notary.PrivDir, notary.NonRootKeysSubdir), + } { + for _, role := range append(nonRootRolesToTest, data.CanonicalRootRole) { + testGetKeyWithRole(t, "", role, expectedSubdir, true) + testGetKeyWithRole(t, gun, role, expectedSubdir, true) + } } -} -func testGetKeyWithRole(t *testing.T, gun, role, expectedSubdir string, success bool) { - testData := []byte(fmt.Sprintf(`-----BEGIN RSA PRIVATE KEY----- -role: %s + // keys outside of the private dir cannot be read + for _, role := range append(nonRootRolesToTest, data.CanonicalRootRole) { + testGetKeyWithRole(t, "", role, "otherDir", false) + testGetKeyWithRole(t, gun, role, "otherDir", false) + } +} +func writeKeyFile(t *testing.T, perms os.FileMode, filename, roleInPEM string) []byte { + testData := []byte(`-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAyUIXjsrWRrvPa4Bzp3VJ6uOUGPay2fUpSV8XzNxZxIG/Opdr +k3EQi1im6WOqF3Y5AS1UjYRxNuRN+cAZeo3uS1pOTuoSupBXuchVw8s4hZJ5vXn TRmGb+xY7tZ1ZVgPfAZDib9sRSUsL/gC+aSyprAjG/YBdbF06qKbfOfsoCEYW1OQ @@ -216,7 +228,24 @@ EkqpAoGAJWe8PC0XK2RE9VkbSPg9Ehr939mOLWiHGYTVWPttUcum/rTKu73/X/mj WxnPWGtzM1pHWypSokW90SP4/xedMxludvBvmz+CTYkNJcBGCrJumy11qJhii9xp EMl3eFOJXjIch/wIesRSN+2dGOsl7neercjMh1i9RvpCwHDx/E0= -----END RSA PRIVATE KEY----- -`, role)) +`) + + if roleInPEM != "" { + block, _ := pem.Decode(testData) + require.NotNil(t, block) + block.Headers = map[string]string{ + "role": roleInPEM, + } + testData = pem.EncodeToMemory(block) + } + + os.MkdirAll(filepath.Dir(filename), perms) + err := ioutil.WriteFile(filename, testData, perms) + require.NoError(t, err, "failed to write test file") + return testData +} + +func testGetKeyWithRole(t *testing.T, gun, role, expectedSubdir string, success bool) { testName := "keyID" testExt := "key" perms := os.FileMode(0755) @@ -229,10 +258,8 @@ EMl3eFOJXjIch/wIesRSN+2dGOsl7neercjMh1i9RvpCwHDx/E0= defer os.RemoveAll(tempBaseDir) // Since we're generating this manually we need to add the extension '.' - filePath := filepath.Join(tempBaseDir, notary.PrivDir, expectedSubdir, testName+"."+testExt) - os.MkdirAll(filepath.Dir(filePath), perms) - err = ioutil.WriteFile(filePath, testData, perms) - require.NoError(t, err, "failed to write test file") + filePath := filepath.Join(tempBaseDir, expectedSubdir, testName+"."+testExt) + testData := writeKeyFile(t, perms, filePath, role) // Create our store store, err := NewKeyFileStore(tempBaseDir, emptyPassphraseRetriever) @@ -240,7 +267,7 @@ EMl3eFOJXjIch/wIesRSN+2dGOsl7neercjMh1i9RvpCwHDx/E0= // Call the GetKey function if gun != "" { - testName = gun + "/keyID" + testName = path.Join(gun, "keyID") } privKey, _, err := store.GetKey(testName) if success { @@ -263,34 +290,6 @@ EMl3eFOJXjIch/wIesRSN+2dGOsl7neercjMh1i9RvpCwHDx/E0= // TestGetLegacyKey ensures we can still load keys where the role // is stored as part of the filename (i.e. _.key func TestGetLegacyKey(t *testing.T) { - testData := []byte(`-----BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEAyUIXjsrWRrvPa4Bzp3VJ6uOUGPay2fUpSV8XzNxZxIG/Opdr -+k3EQi1im6WOqF3Y5AS1UjYRxNuRN+cAZeo3uS1pOTuoSupBXuchVw8s4hZJ5vXn -TRmGb+xY7tZ1ZVgPfAZDib9sRSUsL/gC+aSyprAjG/YBdbF06qKbfOfsoCEYW1OQ -82JqHzQH514RFYPTnEGpvfxWaqmFQLmv0uMxV/cAYvqtrGkXuP0+a8PknlD2obw5 -0rHE56Su1c3Q42S7L51K38tpbgWOSRcTfDUWEj5v9wokkNQvyKBwbS996s4EJaZd -7r6M0h1pHnuRxcSaZLYRwgOe1VNGg2VfWzgd5QIDAQABAoIBAF9LGwpygmj1jm3R -YXGd+ITugvYbAW5wRb9G9mb6wspnwNsGTYsz/UR0ZudZyaVw4jx8+jnV/i3e5PC6 -QRcAgqf8l4EQ/UuThaZg/AlT1yWp9g4UyxNXja87EpTsGKQGwTYxZRM4/xPyWOzR -mt8Hm8uPROB9aA2JG9npaoQG8KSUj25G2Qot3ukw/IOtqwN/Sx1EqF0EfCH1K4KU -a5TrqlYDFmHbqT1zTRec/BTtVXNsg8xmF94U1HpWf3Lpg0BPYT7JiN2DPoLelRDy -a/A+a3ZMRNISL5wbq/jyALLOOyOkIqa+KEOeW3USuePd6RhDMzMm/0ocp5FCwYfo -k4DDeaECgYEA0eSMD1dPGo+u8UTD8i7ZsZCS5lmXLNuuAg5f5B/FGghD8ymPROIb -dnJL5QSbUpmBsYJ+nnO8RiLrICGBe7BehOitCKi/iiZKJO6edrfNKzhf4XlU0HFl -jAOMa975pHjeCoZ1cXJOEO9oW4SWTCyBDBSqH3/ZMgIOiIEk896lSmkCgYEA9Xf5 -Jqv3HtQVvjugV/axAh9aI8LMjlfFr9SK7iXpY53UdcylOSWKrrDok3UnrSEykjm7 -UL3eCU5jwtkVnEXesNn6DdYo3r43E6iAiph7IBkB5dh0yv3vhIXPgYqyTnpdz4pg -3yPGBHMPnJUBThg1qM7k6a2BKHWySxEgC1DTMB0CgYAGvdmF0J8Y0k6jLzs/9yNE -4cjmHzCM3016gW2xDRgumt9b2xTf+Ic7SbaIV5qJj6arxe49NqhwdESrFohrKaIP -kM2l/o2QaWRuRT/Pvl2Xqsrhmh0QSOQjGCYVfOb10nAHVIRHLY22W4o1jk+piLBo -a+1+74NRaOGAnu1J6/fRKQKBgAF180+dmlzemjqFlFCxsR/4G8s2r4zxTMXdF+6O -3zKuj8MbsqgCZy7e8qNeARxwpCJmoYy7dITNqJ5SOGSzrb2Trn9ClP+uVhmR2SH6 -AlGQlIhPn3JNzI0XVsLIloMNC13ezvDE/7qrDJ677EQQtNEKWiZh1/DrsmHr+irX -EkqpAoGAJWe8PC0XK2RE9VkbSPg9Ehr939mOLWiHGYTVWPttUcum/rTKu73/X/mj -WxnPWGtzM1pHWypSokW90SP4/xedMxludvBvmz+CTYkNJcBGCrJumy11qJhii9xp -EMl3eFOJXjIch/wIesRSN+2dGOsl7neercjMh1i9RvpCwHDx/E0= ------END RSA PRIVATE KEY----- -`) testName := "docker.com/notary/root" testExt := "key" testAlias := "root" @@ -305,10 +304,7 @@ EMl3eFOJXjIch/wIesRSN+2dGOsl7neercjMh1i9RvpCwHDx/E0= // Since we're generating this manually we need to add the extension '.' filePath := filepath.Join(tempBaseDir, notary.PrivDir, notary.RootKeysSubdir, testName+"_"+testAlias+"."+testExt) - - os.MkdirAll(filepath.Dir(filePath), perms) - err = ioutil.WriteFile(filePath, testData, perms) - require.NoError(t, err, "failed to write test file") + writeKeyFile(t, perms, filePath, "") // Create our store store, err := NewKeyFileStore(tempBaseDir, emptyPassphraseRetriever) @@ -356,12 +352,6 @@ func TestListKeys(t *testing.T) { require.Equal(t, role, listedInfo.Role) } - // Write an invalid filename to the directory - filePath := filepath.Join(tempBaseDir, notary.PrivDir, notary.RootKeysSubdir, "fakekeyname.key") - err = ioutil.WriteFile(filePath, []byte("data"), perms) - require.NoError(t, err, "failed to write test file") - - // Check to see if the keystore still lists two keys keyMap := store.ListKeys() require.Len(t, keyMap, len(roles)) @@ -372,6 +362,31 @@ func TestListKeys(t *testing.T) { _, err = store.GetKeyInfo(keyID) require.NoError(t, err) } + + require.Len(t, store.ListKeys(), len(roles)) + + // ListKeys() works even if the keys are in non-standard locations + for i, loc := range []string{ + filepath.Join(tempBaseDir, notary.PrivDir, notary.RootKeysSubdir), + filepath.Join(tempBaseDir, notary.PrivDir, notary.NonRootKeysSubdir), + filepath.Join(tempBaseDir, notary.PrivDir, notary.RootKeysSubdir, testName), + filepath.Join(tempBaseDir, notary.PrivDir, testName), + filepath.Join(tempBaseDir, notary.PrivDir), + tempBaseDir, // this key won't be read + } { + fp := filepath.Join(loc, fmt.Sprintf("keyID%d.key", i)) + writeKeyFile(t, perms, fp, "") + + // Ensure file exists + _, err := ioutil.ReadFile(fp) + require.NoError(t, err, "expected file not found") + } + + // update our store so we read from the FS again + store, err = NewKeyFileStore(tempBaseDir, passphraseRetriever) + require.NoError(t, err, "failed to create new key filestore") + + require.Len(t, store.ListKeys(), len(roles)+5) } func TestAddGetKeyMemStore(t *testing.T) { @@ -567,6 +582,42 @@ func TestRemoveKey(t *testing.T) { testRemoveKeyWithRole(t, data.CanonicalSnapshotRole, filepath.Join(notary.NonRootKeysSubdir, gun)) testRemoveKeyWithRole(t, "targets/a/b/c", notary.NonRootKeysSubdir) testRemoveKeyWithRole(t, "invalidRole", notary.NonRootKeysSubdir) + + // create another store for other testing + tempBaseDir, err := ioutil.TempDir("", "notary-test-") + require.NoError(t, err, "failed to create a temporary directory") + defer os.RemoveAll(tempBaseDir) + + store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever) + require.NoError(t, err, "failed to create new key filestore") + + // write keys to non-standard locations - since we're generating keys manually + // we need to add the extenxion + perms := os.FileMode(0755) + for i, loc := range []string{ + filepath.Join(tempBaseDir, notary.PrivDir, notary.RootKeysSubdir), + filepath.Join(tempBaseDir, notary.PrivDir, notary.NonRootKeysSubdir), + filepath.Join(tempBaseDir, notary.PrivDir, notary.RootKeysSubdir, gun), + filepath.Join(tempBaseDir, notary.PrivDir, gun), + filepath.Join(tempBaseDir, notary.PrivDir), + } { + fp := filepath.Join(loc, fmt.Sprintf("keyID%d.key", i)) + writeKeyFile(t, perms, fp, "") + + // Ensure file exists + _, err := ioutil.ReadFile(fp) + require.NoError(t, err, "expected file not found") + + err = store.RemoveKey(fmt.Sprintf("keyID%d", i)) + require.NoError(t, err) + + // File should no longer exist + _, err = ioutil.ReadFile(fp) + require.True(t, os.IsNotExist(err), "file should not exist") + } + + // removing a non-existent key should not error + require.NoError(t, store.RemoveKey("nope")) } func testRemoveKeyWithRole(t *testing.T, role, expectedSubdir string) { @@ -599,9 +650,9 @@ func testRemoveKeyWithRole(t *testing.T, role, expectedSubdir string) { err = store.RemoveKey(privKey.ID()) require.NoError(t, err, "unable to remove key") - // Check to see if file still exists + // File should no longer exist _, err = ioutil.ReadFile(expectedFilePath) - require.Error(t, err, "file should not exist") + require.True(t, os.IsNotExist(err), "file should not exist") } func TestKeysAreCached(t *testing.T) { diff --git a/tuf/signed/errors.go b/tuf/signed/errors.go index 1d4d9de4b..2a633c864 100644 --- a/tuf/signed/errors.go +++ b/tuf/signed/errors.go @@ -14,12 +14,17 @@ type ErrInsufficientSignatures struct { } func (e ErrInsufficientSignatures) Error() string { - candidates := strings.Join(e.MissingKeyIDs, ", ") + candidates := "" + if len(e.MissingKeyIDs) > 0 { + candidates = fmt.Sprintf(" (%s)", strings.Join(e.MissingKeyIDs, ", ")) + } + if e.FoundKeys == 0 { - return fmt.Sprintf("signing keys not available, need %d keys out of: %s", e.NeededKeys, candidates) + return fmt.Sprintf("signing keys not available: need %d keys from %d possible keys%s", + e.NeededKeys, len(e.MissingKeyIDs), candidates) } - return fmt.Sprintf("not enough signing keys: got %d of %d needed keys, other candidates: %s", - e.FoundKeys, e.NeededKeys, candidates) + return fmt.Sprintf("not enough signing keys: found %d of %d needed keys - %d other possible keys%s", + e.FoundKeys, e.NeededKeys, len(e.MissingKeyIDs), candidates) } // ErrExpired indicates a piece of metadata has expired diff --git a/tuf/signed/sign_test.go b/tuf/signed/sign_test.go index 482092ad3..5ede9c9ab 100644 --- a/tuf/signed/sign_test.go +++ b/tuf/signed/sign_test.go @@ -345,3 +345,16 @@ func TestSignFailingKeys(t *testing.T) { require.Error(t, err) require.IsType(t, FailingPrivateKeyErr{}, err) } + +// make sure we produce readable error messages +func TestErrInsufficientSignaturesMessaging(t *testing.T) { + require.Contains(t, + ErrInsufficientSignatures{NeededKeys: 2, MissingKeyIDs: []string{"ID1", "ID2"}}.Error(), + "need 2 keys from 2 possible keys (ID1, ID2)") + require.Contains(t, + ErrInsufficientSignatures{FoundKeys: 1, NeededKeys: 2, MissingKeyIDs: []string{"ID1", "ID2"}}.Error(), + "found 1 of 2 needed keys - 2 other possible keys (ID1, ID2)") + require.Contains(t, + ErrInsufficientSignatures{FoundKeys: 1, NeededKeys: 2, MissingKeyIDs: []string{}}.Error(), + "found 1 of 2 needed keys - 0 other possible keys") +} diff --git a/utils/configuration.go b/utils/configuration.go index f94b73a27..cc97810da 100644 --- a/utils/configuration.go +++ b/utils/configuration.go @@ -5,6 +5,8 @@ package utils import ( "crypto/tls" "fmt" + "os" + "os/signal" "path/filepath" "strings" @@ -244,3 +246,20 @@ func AdjustLogLevel(increment bool) error { logrus.SetLevel(lvl) return nil } + +// SetupSignalTrap is a utility to trap supported signals hand handle them (currently by increasing logging) +func SetupSignalTrap(handler func(os.Signal)) chan os.Signal { + if len(notary.NotarySupportedSignals) == 0 { + return nil + + } + c := make(chan os.Signal, 1) + signal.Notify(c, notary.NotarySupportedSignals...) + go func() { + for { + handler(<-c) + } + }() + + return c +} diff --git a/utils/configuration_nowindows.go b/utils/configuration_nowindows.go new file mode 100644 index 000000000..dc586a669 --- /dev/null +++ b/utils/configuration_nowindows.go @@ -0,0 +1,29 @@ +// +build !windows + +package utils + +import ( + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// LogLevelSignalHandle will increase/decrease the logging level via the signal we get. +func LogLevelSignalHandle(sig os.Signal) { + switch sig { + case syscall.SIGUSR1: + if err := AdjustLogLevel(true); err != nil { + fmt.Printf("Attempt to increase log level failed, will remain at %s level, error: %s\n", logrus.GetLevel(), err) + return + } + case syscall.SIGUSR2: + if err := AdjustLogLevel(false); err != nil { + fmt.Printf("Attempt to decrease log level failed, will remain at %s level, error: %s\n", logrus.GetLevel(), err) + return + } + } + + fmt.Println("Successfully setting log level to", logrus.GetLevel()) +} diff --git a/utils/configuration_nowindows_test.go b/utils/configuration_nowindows_test.go new file mode 100644 index 000000000..eb02e64d6 --- /dev/null +++ b/utils/configuration_nowindows_test.go @@ -0,0 +1,24 @@ +// +build !windows + +package utils + +import ( + "syscall" + "testing" + + "github.com/Sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +func TestLogLevelSignalHandle(t *testing.T) { + signalOperation := map[bool]syscall.Signal{ + optIncrement: syscall.SIGUSR1, + optDecrement: syscall.SIGUSR2, + } + + for _, expt := range logLevelExpectations { + logrus.SetLevel(expt.startLevel) + LogLevelSignalHandle(signalOperation[expt.increment]) + require.Equal(t, expt.endLevel, logrus.GetLevel()) + } +} diff --git a/utils/configuration_test.go b/utils/configuration_test.go index bf0c5f71a..fb77c8542 100644 --- a/utils/configuration_test.go +++ b/utils/configuration_test.go @@ -6,8 +6,10 @@ import ( "fmt" "io/ioutil" "os" + "os/signal" "path/filepath" "reflect" + "syscall" "testing" "github.com/Sirupsen/logrus" @@ -490,73 +492,84 @@ func TestParseViperWithValidFile(t *testing.T) { require.Equal(t, "debug", v.GetString("logging.level")) } +type logLevelTests struct { + startLevel logrus.Level + endLevel logrus.Level + increment bool +} + +const ( + optIncrement = true + optDecrement = false +) + +var logLevelExpectations = []logLevelTests{ + // highest: Debug, lowest: Panic. Incrementing brings everything up one level, except debug which is max level + {startLevel: logrus.DebugLevel, increment: optIncrement, endLevel: logrus.DebugLevel}, + {startLevel: logrus.InfoLevel, increment: optIncrement, endLevel: logrus.DebugLevel}, + {startLevel: logrus.WarnLevel, increment: optIncrement, endLevel: logrus.InfoLevel}, + {startLevel: logrus.ErrorLevel, increment: optIncrement, endLevel: logrus.WarnLevel}, + {startLevel: logrus.FatalLevel, increment: optIncrement, endLevel: logrus.ErrorLevel}, + {startLevel: logrus.PanicLevel, increment: optIncrement, endLevel: logrus.FatalLevel}, + + // highest: Debug, lowest: Panic. Decrementing brings everything down one level, except panic which is min level + {startLevel: logrus.DebugLevel, increment: optDecrement, endLevel: logrus.InfoLevel}, + {startLevel: logrus.InfoLevel, increment: optDecrement, endLevel: logrus.WarnLevel}, + {startLevel: logrus.WarnLevel, increment: optDecrement, endLevel: logrus.ErrorLevel}, + {startLevel: logrus.ErrorLevel, increment: optDecrement, endLevel: logrus.FatalLevel}, + {startLevel: logrus.FatalLevel, increment: optDecrement, endLevel: logrus.PanicLevel}, + {startLevel: logrus.PanicLevel, increment: optDecrement, endLevel: logrus.PanicLevel}, +} + func TestAdjustLogLevel(t *testing.T) { + for _, expt := range logLevelExpectations { + logrus.SetLevel(expt.startLevel) + err := AdjustLogLevel(expt.increment) + + if expt.startLevel == expt.endLevel { + require.Error(t, err) // because if it didn't change, that means AdjustLogLevel failed + } else { + require.NoError(t, err) + } - // To indicate increment or decrement the logging level - optIncrement := true - optDecrement := false + require.Equal(t, expt.endLevel, logrus.GetLevel()) + } +} - // Debug is the highest level for now, so we expected a error here - logrus.SetLevel(logrus.DebugLevel) - err := AdjustLogLevel(optIncrement) - require.Error(t, err) - // Debug -> Info - logrus.SetLevel(logrus.DebugLevel) - err = AdjustLogLevel(optDecrement) - require.NoError(t, err) - require.Equal(t, logrus.InfoLevel, logrus.GetLevel()) +func testSetSignalTrap(t *testing.T) { + var signalsPassedOn map[string]struct{} - // Info -> Debug - logrus.SetLevel(logrus.InfoLevel) - err = AdjustLogLevel(optIncrement) - require.NoError(t, err) - require.Equal(t, logrus.DebugLevel, logrus.GetLevel()) - // Info -> Warn - logrus.SetLevel(logrus.InfoLevel) - err = AdjustLogLevel(optDecrement) - require.NoError(t, err) - require.Equal(t, logrus.WarnLevel, logrus.GetLevel()) + signalHandler := func(s os.Signal) { + signalsPassedOn := make(map[string]struct{}) + signalsPassedOn[s.String()] = struct{}{} + } + c := SetupSignalTrap(signalHandler) - // Warn -> Info - logrus.SetLevel(logrus.WarnLevel) - err = AdjustLogLevel(optIncrement) - require.NoError(t, err) - require.Equal(t, logrus.InfoLevel, logrus.GetLevel()) - // Warn -> Error - logrus.SetLevel(logrus.WarnLevel) - err = AdjustLogLevel(optDecrement) - require.NoError(t, err) - require.Equal(t, logrus.ErrorLevel, logrus.GetLevel()) + if len(notary.NotarySupportedSignals) == 0 { // currently, windows only + require.Nil(t, c) + } else { + require.NotNil(t, c) + defer signal.Stop(c) + } - // Error -> Warn - logrus.SetLevel(logrus.ErrorLevel) - err = AdjustLogLevel(optIncrement) - require.NoError(t, err) - require.Equal(t, logrus.WarnLevel, logrus.GetLevel()) - // Error -> Fatal - logrus.SetLevel(logrus.ErrorLevel) - err = AdjustLogLevel(optDecrement) - require.NoError(t, err) - require.Equal(t, logrus.FatalLevel, logrus.GetLevel()) + for _, s := range notary.NotarySupportedSignals { + syscallSignal, ok := s.(syscall.Signal) + require.True(t, ok) + require.NoError(t, syscall.Kill(syscall.Getpid(), syscallSignal)) + require.Len(t, signalsPassedOn, 0) + require.NotNil(t, signalsPassedOn[s.String()]) + } +} - // Fatal -> Error - logrus.SetLevel(logrus.FatalLevel) - err = AdjustLogLevel(optIncrement) - require.NoError(t, err) - require.Equal(t, logrus.ErrorLevel, logrus.GetLevel()) - // Fatal -> Panic - logrus.SetLevel(logrus.FatalLevel) - err = AdjustLogLevel(optDecrement) - require.NoError(t, err) - require.Equal(t, logrus.PanicLevel, logrus.GetLevel()) +// TODO: undo this extra indirection, needed for mocking notary.NotarySupportedSignals being empty, when we have +// a windows CI system running +func TestSetSignalTrap(t *testing.T) { + testSetSignalTrap(t) +} - // Panic -> Fatal - logrus.SetLevel(logrus.PanicLevel) - err = AdjustLogLevel(optIncrement) - require.NoError(t, err) - require.Equal(t, logrus.FatalLevel, logrus.GetLevel()) - // Panic is the lowest level for now, so we expected a error here - logrus.SetLevel(logrus.PanicLevel) - err = AdjustLogLevel(optDecrement) - require.Error(t, err) +func TestSetSignalTrapMockWindows(t *testing.T) { + old := notary.NotarySupportedSignals + notary.NotarySupportedSignals = nil + testSetSignalTrap(t) + notary.NotarySupportedSignals = old } diff --git a/utils/configuration_windows.go b/utils/configuration_windows.go new file mode 100644 index 000000000..bb43c7e3b --- /dev/null +++ b/utils/configuration_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package utils + +import "os" + +// LogLevelSignalHandle will do nothing, because we aren't currently supporting signal handling in windows +func LogLevelSignalHandle(sig os.Signal) { +} diff --git a/vendor/github.com/docker/distribution/.drone.yml b/vendor/github.com/docker/distribution/.drone.yml deleted file mode 100644 index d943e19ff..000000000 --- a/vendor/github.com/docker/distribution/.drone.yml +++ /dev/null @@ -1,38 +0,0 @@ -image: dmp42/go:stable - -script: - # To be spoofed back into the test image - - go get github.com/modocache/gover - - - go get -t ./... - - # Go fmt - - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - # Go lint - - test -z "$(golint ./... | tee /dev/stderr)" - # Go vet - - go vet ./... - # Go test - - go test -v -race -cover ./... - # Helper to concatenate reports - - gover - # Send to coverall - - goveralls -service drone.io -coverprofile=gover.coverprofile -repotoken {{COVERALLS_TOKEN}} - - # Do we want these as well? - # - go get code.google.com/p/go.tools/cmd/goimports - # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" - # http://labix.org/gocheck - -notify: - email: - recipients: - - distribution@docker.com - - slack: - team: docker - channel: "#dt" - username: mom - token: {{SLACK_TOKEN}} - on_success: true - on_failure: true diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap index 191e60cda..d99106019 100644 --- a/vendor/github.com/docker/distribution/.mailmap +++ b/vendor/github.com/docker/distribution/.mailmap @@ -2,6 +2,7 @@ Stephen J Day Stephen Day Stephen Day Olivier Gambier Olivier Gambier Brian Bland Brian Bland +Brian Bland Brian Bland Josh Hawn Josh Hawn Richard Scothern Richard Richard Scothern Richard Scothern @@ -11,4 +12,7 @@ Jessie Frazelle Sharif Nassar Sharif Nassar Sven Dowideit Sven Dowideit Vincent Giersch Vincent Giersch -davidli davidli \ No newline at end of file +davidli davidli +Omer Cohen Omer Cohen +Eric Yang Eric Yang +Nikita Tarasov Nikita diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS index 4b97cd78d..9e80e062b 100644 --- a/vendor/github.com/docker/distribution/AUTHORS +++ b/vendor/github.com/docker/distribution/AUTHORS @@ -1,19 +1,28 @@ Aaron Lehmann +Aaron Schlesinger Aaron Vinson Adam Enger Adrian Mouat Ahmet Alp Balkan Alex Chan Alex Elman +Alexey Gladkov +allencloud amitshukla Amy Lindburg +Andrew Hsu Andrew Meredith +Andrew T Nguyen Andrey Kostov Andy Goldstein +Anis Elleuch Anton Tiurin Antonio Mercado +Antonio Murdaca +Arien Holthuizen Arnaud Porterie Arthur Baars +Asuka Suzuki Avi Miller Ayose Cazorla BadZen @@ -21,8 +30,11 @@ Ben Firshman bin liu Brian Bland burnettk +Carson A Chris Dillon +cyli Daisuke Fujita +Daniel Huhn Darren Shepherd Dave Trombley Dave Tucker @@ -33,56 +45,82 @@ davidli Dejan Golja Derek McGowan Diogo Mónica +DJ Enriquez Donald Huang Doug Davis +Eric Yang +Fabio Huser farmerworking +Felix Yan Florentin Raud Frederick F. Kautz IV +gabriell nascimento +Gleb Schukin harche Henri Gomez Hu Keping Hua Wang +HuKeping Ian Babrou +igayoso Jack Griffin Jason Freidman Jeff Nickoloff Jessie Frazelle +jhaohai Jianqing Wang +John Starks +Jon Johnson Jon Poler Jonathan Boulle Jordan Liggitt Josh Hawn Julien Fernandez +Ke Xu +Keerthan Mala Kelsey Hightower Kenneth Lim +Kenny Leung Li Yi +Liu Hua +liuchang0812 Louis Kottmann Luke Carpenter Mary Anthony Matt Bentley +Matt Duch Matt Moore Matt Robenolt Michael Prokop +Michal Minar Miquel Sabaté Morgan Bauer moxiegirl Nathan Sullivan nevermosby Nghia Tran +Nikita Tarasov Nuutti Kotivuori Oilbeater Olivier Gambier Olivier Jacques +Omer Cohen Patrick Devine +Phil Estes Philip Misiowiec Richard Scothern +Rodolfo Carvalho Rusty Conover +Sean Boran Sebastiaan van Stijn +Serge Dubrouski Sharif Nassar Shawn Falkner-Horine Shreyas Karnik Simon Thulbourn Spencer Rinehart +Stefan Majewsky +Stefan Weil Stephen J Day Sungho Moon Sven Dowideit @@ -93,12 +131,17 @@ Thomas Sjögren Tianon Gravi Tibor Vass Tonis Tiigi +Tony Holdstock-Brown +Trevor Pounds Troels Thomsen Vincent Batts Vincent Demeester Vincent Giersch W. Trevor King +weiyuan.yl xg.song xiekeyang Yann ROBERT yuzou +zhouhaibing089 +姜继忠 diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md new file mode 100644 index 000000000..d9577022b --- /dev/null +++ b/vendor/github.com/docker/distribution/BUILDING.md @@ -0,0 +1,119 @@ + +# Building the registry source + +## Use-case + +This is useful if you intend to actively work on the registry. + +### Alternatives + +Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). + +People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. + +OS X users who want to run natively can do so following [the instructions here](osx-setup-guide.md). + +### Gotchas + +You are expected to know your way around with go & git. + +If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. + +## Build the development environment + +The first prerequisite of properly building distribution targets is to have a Go +development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) +for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the +environment. + +If a Go development environment is setup, one can use `go get` to install the +`registry` command from the current latest: + + go get github.com/docker/distribution/cmd/registry + +The above will install the source repository into the `GOPATH`. + +Now create the directory for the registry data (this might require you to set permissions properly) + + mkdir -p /var/lib/registry + +... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. + +The `registry` +binary can then be run with the following: + + $ $GOPATH/bin/registry --version + $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown + +> __NOTE:__ While you do not need to use `go get` to checkout the distribution +> project, for these build instructions to work, the project must be checked +> out in the correct location in the `GOPATH`. This should almost always be +> `$GOPATH/src/github.com/docker/distribution`. + +The registry can be run with the default config using the following +incantation: + + $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml + INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] debug server listening localhost:5001 + +If it is working, one should see the above log messages. + +### Repeatable Builds + +For the full development experience, one should `cd` into +`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` +commands, such as `go test`, should work per package (please see +[Developing](#developing) if they don't work). + +A `Makefile` has been provided as a convenience to support repeatable builds. +Please install the following into `GOPATH` for it to work: + + go get github.com/tools/godep github.com/golang/lint/golint + +**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly. + +Once these commands are available in the `GOPATH`, run `make` to get a full +build: + + $ make + + clean + + fmt + + vet + + lint + + build + github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar + github.com/Sirupsen/logrus + github.com/docker/libtrust + ... + github.com/yvasiyarov/gorelic + github.com/docker/distribution/registry/handlers + github.com/docker/distribution/cmd/registry + + test + ... + ok github.com/docker/distribution/digest 7.875s + ok github.com/docker/distribution/manifest 0.028s + ok github.com/docker/distribution/notifications 17.322s + ? github.com/docker/distribution/registry [no test files] + ok github.com/docker/distribution/registry/api/v2 0.101s + ? github.com/docker/distribution/registry/auth [no test files] + ok github.com/docker/distribution/registry/auth/silly 0.011s + ... + + /Users/sday/go/src/github.com/docker/distribution/bin/registry + + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template + + binaries + +The above provides a repeatable build using the contents of the vendored +Godeps directory. This includes formatting, vetting, linting, building, +testing and generating tagged binaries. We can verify this worked by running +the registry binary generated in the "./bin" directory: + + $ ./bin/registry -version + ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m + +### Optional build tags + +Optional [build tags](http://golang.org/pkg/go/build/) can be provided using +the environment variable `DOCKER_BUILDTAGS`. diff --git a/vendor/github.com/docker/distribution/CHANGELOG.md b/vendor/github.com/docker/distribution/CHANGELOG.md new file mode 100644 index 000000000..3445c090c --- /dev/null +++ b/vendor/github.com/docker/distribution/CHANGELOG.md @@ -0,0 +1,35 @@ +# Changelog + +## 2.5.0 (2016-06-14) + +### Storage +- Ensure uploads directory is cleaned after upload is commited +- Add ability to cap concurrent operations in filesystem driver +- S3: Add 'us-gov-west-1' to the valid region list +- Swift: Handle ceph not returning Last-Modified header for HEAD requests +- Add redirect middleware + +#### Registry +- Add support for blobAccessController middleware +- Add support for layers from foreign sources +- Remove signature store +- Add support for Let's Encrypt +- Correct yaml key names in configuration + +#### Client +- Add option to get content digest from manifest get + +#### Spec +- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported +- Clarify API documentation around catalog fetch behavior + +### API +- Support returning HTTP 429 (Too Many Requests) + +### Documentation +- Update auth documentation examples to show "expires in" as int + +### Docker Image +- Use Alpine Linux as base image + + diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md index 1a9ecb744..7cc7aedff 100644 --- a/vendor/github.com/docker/distribution/CONTRIBUTING.md +++ b/vendor/github.com/docker/distribution/CONTRIBUTING.md @@ -76,7 +76,7 @@ Some simple rules to ensure quick merge: You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. -If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. +If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. Then you should submit your implementation, clearly linking to the issue (and possible proposal). @@ -90,7 +90,7 @@ It's mandatory to: Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. -Have a look at a great, succesful contribution: the [Ceph driver PR](https://github.com/docker/distribution/pull/443) +Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) ## Coding Style diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile index 1a5822229..fa9cd4627 100644 --- a/vendor/github.com/docker/distribution/Dockerfile +++ b/vendor/github.com/docker/distribution/Dockerfile @@ -1,19 +1,18 @@ -FROM golang:1.5.2 - -RUN apt-get update && \ - apt-get install -y librados-dev apache2-utils && \ - rm -rf /var/lib/apt/lists/* +FROM golang:1.6-alpine ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution -ENV GOPATH $DISTRIBUTION_DIR/Godeps/_workspace:$GOPATH -ENV DOCKER_BUILDTAGS include_rados include_oss include_gcs +ENV DOCKER_BUILDTAGS include_oss include_gcs WORKDIR $DISTRIBUTION_DIR COPY . $DISTRIBUTION_DIR COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml + +RUN set -ex \ + && apk add --no-cache make git + RUN make PREFIX=/go clean binaries VOLUME ["/var/lib/registry"] EXPOSE 5000 ENTRYPOINT ["registry"] -CMD ["/etc/docker/registry/config.yml"] +CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile index 4604a39a0..a0602d0b2 100644 --- a/vendor/github.com/docker/distribution/Makefile +++ b/vendor/github.com/docker/distribution/Makefile @@ -14,8 +14,8 @@ endif GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" .PHONY: clean all fmt vet lint build test binaries -.DEFAULT: default -all: AUTHORS clean fmt vet fmt lint build test binaries +.DEFAULT: all +all: fmt vet lint build test binaries AUTHORS: .mailmap .git/HEAD git log --format='%aN <%aE>' | sort -fu > $@ @@ -24,51 +24,83 @@ AUTHORS: .mailmap .git/HEAD version/version.go: ./version/version.sh > $@ -${PREFIX}/bin/registry: version/version.go $(shell find . -type f -name '*.go') +# Required for go 1.5 to build +GO15VENDOREXPERIMENT := 1 + +# Package list +PKGS := $(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) + +# Resolving binary dependencies for specific targets +GOLINT := $(shell which golint || echo '') +GODEP := $(shell which godep || echo '') + +${PREFIX}/bin/registry: $(wildcard **/*.go) @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry -${PREFIX}/bin/digest: version/version.go $(shell find . -type f -name '*.go') +${PREFIX}/bin/digest: $(wildcard **/*.go) @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest -${PREFIX}/bin/registry-api-descriptor-template: version/version.go $(shell find . -type f -name '*.go') +${PREFIX}/bin/registry-api-descriptor-template: $(wildcard **/*.go) @echo "+ $@" @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template ./bin/registry-api-descriptor-template $< > $@ -# Depends on binaries because vet will silently fail if it can't load compiled -# imports -vet: binaries +vet: @echo "+ $@" - @go vet ./... + @go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS) fmt: @echo "+ $@" - @test -z "$$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" || \ - echo "+ please format Go code with 'gofmt -s'" + @test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \ + (echo >&2 "+ please format Go code with 'gofmt -s'" && false) lint: @echo "+ $@" - @test -z "$$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" + $(if $(GOLINT), , \ + $(error Please install golint: `go get -u github.com/golang/lint/golint`)) + @test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" build: @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} ./... + @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS) test: @echo "+ $@" - @go test -test.short -tags "${DOCKER_BUILDTAGS}" ./... + @go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS) test-full: @echo "+ $@" - @go test ./... + @go test -tags "${DOCKER_BUILDTAGS}" $(PKGS) binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template @echo "+ $@" clean: @echo "+ $@" - @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/registry-api-descriptor-template" + @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" + +dep-save: + @echo "+ $@" + $(if $(GODEP), , \ + $(error Please install godep: go get github.com/tools/godep)) + @$(GODEP) save $(PKGS) + +dep-restore: + @echo "+ $@" + $(if $(GODEP), , \ + $(error Please install godep: go get github.com/tools/godep)) + @$(GODEP) restore -v + +dep-validate: dep-restore + @echo "+ $@" + @rm -Rf .vendor.bak + @mv vendor .vendor.bak + @rm -Rf Godeps + @$(GODEP) save ./... + @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ + (echo >&2 "+ borked dependencies! what you have in Godeps/Godeps.json does not match with what you have in vendor" && false) + @rm -Rf .vendor.bak diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md index e8262133a..d35bcb682 100644 --- a/vendor/github.com/docker/distribution/README.md +++ b/vendor/github.com/docker/distribution/README.md @@ -17,7 +17,7 @@ This repository contains the following components: |**Component** |Description | |--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | -| **libraries** | A rich set of libraries for interacting with,distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | +| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | | **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | | **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | @@ -83,7 +83,7 @@ created. For more information see [docker/migrator] Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute issues, fixes, and patches to this project. If you are contributing code, see -the instructions for [building a development environment](docs/building.md). +the instructions for [building a development environment](docs/recipes/building.md). ## Support @@ -128,4 +128,4 @@ avenues are available for support: ## License -This project is distributed under [Apache License, Version 2.0](LICENSE.md). +This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md index 9cdfa36c9..701127afe 100644 --- a/vendor/github.com/docker/distribution/ROADMAP.md +++ b/vendor/github.com/docker/distribution/ROADMAP.md @@ -156,7 +156,7 @@ full and understand the problems behind deletes. While, at first glance, implementing deleting seems simple, there are a number mitigating factors that make many solutions not ideal or even pathological in the context of a registry. The following paragraph discuss the background and -approaches that could be applied to a arrive at a solution. +approaches that could be applied to arrive at a solution. The goal of deletes in any system is to remove unused or unneeded data. Only data requested for deletion should be removed and no other data. Removing diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go index 2087d0f9e..d12533011 100644 --- a/vendor/github.com/docker/distribution/blobs.go +++ b/vendor/github.com/docker/distribution/blobs.go @@ -9,6 +9,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" ) var ( @@ -40,6 +41,18 @@ func (err ErrBlobInvalidDigest) Error() string { err.Digest, err.Reason) } +// ErrBlobMounted returned when a blob is mounted from another repository +// instead of initiating an upload session. +type ErrBlobMounted struct { + From reference.Canonical + Descriptor Descriptor +} + +func (err ErrBlobMounted) Error() string { + return fmt.Sprintf("blob mounted from: %v to: %v", + err.From, err.Descriptor) +} + // Descriptor describes targeted content. Used in conjunction with a blob // store, a descriptor can be used to fetch, store and target any kind of // blob. The struct also describes the wire protocol format. Fields should @@ -56,11 +69,23 @@ type Descriptor struct { // against against this digest. Digest digest.Digest `json:"digest,omitempty"` + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + // NOTE: Before adding a field here, please ensure that all // other options have been exhausted. Much of the type relationships // depend on the simplicity of this type. } +// Descriptor returns the descriptor, to make it satisfy the Describable +// interface. Note that implementations of Describable are generally objects +// which can be described, not simply descriptors; this exception is in place +// to make it more convenient to pass actual descriptors to functions that +// expect Describable objects. +func (d Descriptor) Descriptor() Descriptor { + return d +} + // BlobStatter makes blob descriptors available by digest. The service may // provide a descriptor of a different digest if the provided digest is not // canonical. @@ -75,6 +100,11 @@ type BlobDeleter interface { Delete(ctx context.Context, dgst digest.Digest) error } +// BlobEnumerator enables iterating over blobs from storage +type BlobEnumerator interface { + Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error +} + // BlobDescriptorService manages metadata about a blob by digest. Most // implementations will not expose such an interface explicitly. Such mappings // should be maintained by interacting with the BlobIngester. Hence, this is @@ -97,6 +127,11 @@ type BlobDescriptorService interface { Clear(ctx context.Context, dgst digest.Digest) error } +// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. +type BlobDescriptorServiceFactory interface { + BlobAccessController(svc BlobDescriptorService) BlobDescriptorService +} + // ReadSeekCloser is the primary reader type for blob data, combining // io.ReadSeeker with io.Closer. type ReadSeekCloser interface { @@ -142,20 +177,31 @@ type BlobIngester interface { // returned handle can be written to and later resumed using an opaque // identifier. With this approach, one can Close and Resume a BlobWriter // multiple times until the BlobWriter is committed or cancelled. - Create(ctx context.Context) (BlobWriter, error) + Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) // Resume attempts to resume a write to a blob, identified by an id. Resume(ctx context.Context, id string) (BlobWriter, error) } +// BlobCreateOption is a general extensible function argument for blob creation +// methods. A BlobIngester may choose to honor any or none of the given +// BlobCreateOptions, which can be specific to the implementation of the +// BlobIngester receiving them. +// TODO (brianbland): unify this with ManifestServiceOption in the future +type BlobCreateOption interface { + Apply(interface{}) error +} + // BlobWriter provides a handle for inserting data into a blob store. // Instances should be obtained from BlobWriteService.Writer and // BlobWriteService.Resume. If supported by the store, a writer can be // recovered with the id. type BlobWriter interface { - io.WriteSeeker + io.WriteCloser io.ReaderFrom - io.Closer + + // Size returns the number of bytes written to this blob. + Size() int64 // ID returns the identifier for this writer. The ID can be used with the // Blob service to later resume the write. @@ -180,9 +226,6 @@ type BlobWriter interface { // result in a no-op. This allows use of Cancel in a defer statement, // increasing the assurance that it is correctly called. Cancel(ctx context.Context) error - - // Get a reader to the blob being written by this BlobWriter - Reader() (io.ReadCloser, error) } // BlobService combines the operations to access, read and write blobs. This diff --git a/vendor/github.com/docker/distribution/circle.yml b/vendor/github.com/docker/distribution/circle.yml index f5dc4d7a2..3d1ffd2f0 100644 --- a/vendor/github.com/docker/distribution/circle.yml +++ b/vendor/github.com/docker/distribution/circle.yml @@ -3,15 +3,12 @@ machine: pre: # Install gvm - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) - # Install ceph to test rados driver & create pool - - sudo -i ~/distribution/contrib/ceph/ci-setup.sh - - ceph osd pool create docker-distribution 1 # Install codecov for coverage - pip install --user codecov post: # go - - gvm install go1.5 --prefer-binary --name=stable + - gvm install go1.6 --prefer-binary --name=stable environment: # Convenient shortcuts to "common" locations @@ -19,11 +16,9 @@ machine: BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME # Trick circle brainflat "no absolute path" behavior BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR - DOCKER_BUILDTAGS: "include_rados include_oss include_gcs" + DOCKER_BUILDTAGS: "include_oss include_gcs" # Workaround Circle parsing dumb bugs and/or YAML wonkyness CIRCLE_PAIN: "mode: set" - # Ceph config - RADOS_POOL: "docker-distribution" hosts: # Not used yet @@ -48,52 +43,45 @@ dependencies: gvm use stable && go get github.com/axw/gocov/gocov github.com/golang/lint/golint - # Disabling goveralls for now - # go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint - test: pre: # Output the go versions we are going to test # - gvm use old && go version - gvm use stable && go version + # Ensure validation of dependencies + - gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi: + pwd: $BASE_STABLE + # First thing: build everything. This will catch compile errors, and it's # also necessary for go vet to work properly (see #807). - - gvm use stable && godep go install ./...: + - gvm use stable && godep go install $(go list ./... | grep -v "/vendor/"): pwd: $BASE_STABLE # FMT - - gvm use stable && test -z "$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": + - gvm use stable && make fmt: pwd: $BASE_STABLE # VET - - gvm use stable && go vet ./...: + - gvm use stable && make vet: pwd: $BASE_STABLE # LINT - - gvm use stable && test -z "$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": + - gvm use stable && make lint: pwd: $BASE_STABLE override: - # Test stable, and report - # Preset the goverall report file - # - echo "$CIRCLE_PAIN" > ~/goverage.report - - - gvm use stable; go list ./... | xargs -L 1 -I{} rm -f $GOPATH/src/{}/coverage.out: - pwd: $BASE_STABLE - - - gvm use stable; go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/{}/coverage.out -covermode=count {}: + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': timeout: 600 pwd: $BASE_STABLE post: - # Aggregate and report to coveralls + # Report to codecov - bash <(curl -s https://codecov.io/bash): pwd: $BASE_STABLE ## Notes - # Disabled coveralls reporting: build breaking sending coverage data to coveralls # Disabled the -race detector due to massive memory usage. # Do we want these as well? # - go get code.google.com/p/go.tools/cmd/goimports diff --git a/vendor/github.com/docker/distribution/context/doc.go b/vendor/github.com/docker/distribution/context/doc.go index 6fe1f817d..3b4ab8882 100644 --- a/vendor/github.com/docker/distribution/context/doc.go +++ b/vendor/github.com/docker/distribution/context/doc.go @@ -1,6 +1,6 @@ // Package context provides several utilities for working with // golang.org/x/net/context in http requests. Primarily, the focus is on -// logging relevent request information but this package is not limited to +// logging relevant request information but this package is not limited to // that purpose. // // The easiest way to get started is to get the background context: diff --git a/vendor/github.com/docker/distribution/context/trace.go b/vendor/github.com/docker/distribution/context/trace.go index af4f1351e..721964a84 100644 --- a/vendor/github.com/docker/distribution/context/trace.go +++ b/vendor/github.com/docker/distribution/context/trace.go @@ -10,7 +10,7 @@ import ( // WithTrace allocates a traced timing span in a new context. This allows a // caller to track the time between calling WithTrace and the returned done // function. When the done function is called, a log message is emitted with a -// "trace.duration" field, corresponding to the elapased time and a +// "trace.duration" field, corresponding to the elapsed time and a // "trace.func" field, corresponding to the function that called WithTrace. // // The logging keys "trace.id" and "trace.parent.id" are provided to implement diff --git a/vendor/github.com/docker/distribution/context/util.go b/vendor/github.com/docker/distribution/context/util.go index 299edc004..cb9ef52e3 100644 --- a/vendor/github.com/docker/distribution/context/util.go +++ b/vendor/github.com/docker/distribution/context/util.go @@ -8,25 +8,17 @@ import ( // since that time. If the key is not found, the value returned will be zero. // This is helpful when inferring metrics related to context execution times. func Since(ctx Context, key interface{}) time.Duration { - startedAtI := ctx.Value(key) - if startedAtI != nil { - if startedAt, ok := startedAtI.(time.Time); ok { - return time.Since(startedAt) - } + if startedAt, ok := ctx.Value(key).(time.Time); ok { + return time.Since(startedAt) } - return 0 } // GetStringValue returns a string value from the context. The empty string // will be returned if not found. func GetStringValue(ctx Context, key interface{}) (value string) { - stringi := ctx.Value(key) - if stringi != nil { - if valuev, ok := stringi.(string); ok { - value = valuev - } + if valuev, ok := ctx.Value(key).(string); ok { + value = valuev } - return value } diff --git a/vendor/github.com/docker/distribution/coverpkg.sh b/vendor/github.com/docker/distribution/coverpkg.sh new file mode 100755 index 000000000..25d419ae8 --- /dev/null +++ b/vendor/github.com/docker/distribution/coverpkg.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# Given a subpackage and the containing package, figures out which packages +# need to be passed to `go test -coverpkg`: this includes all of the +# subpackage's dependencies within the containing package, as well as the +# subpackage itself. +DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)" +echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/vendor/github.com/docker/distribution/digest/digest.go b/vendor/github.com/docker/distribution/digest/digest.go index f3e12bca4..31d821bba 100644 --- a/vendor/github.com/docker/distribution/digest/digest.go +++ b/vendor/github.com/docker/distribution/digest/digest.go @@ -28,7 +28,15 @@ type Digest string // NewDigest returns a Digest from alg and a hash.Hash object. func NewDigest(alg Algorithm, h hash.Hash) Digest { - return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil))) + return NewDigestFromBytes(alg, h.Sum(nil)) +} + +// NewDigestFromBytes returns a new digest from the byte contents of p. +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) +// functions. This is also useful for rebuilding digests from binary +// serializations. +func NewDigestFromBytes(alg Algorithm, p []byte) Digest { + return Digest(fmt.Sprintf("%s:%x", alg, p)) } // NewDigestFromHex returns a Digest from alg and a the hex encoded digest. @@ -69,18 +77,7 @@ func FromReader(rd io.Reader) (Digest, error) { // FromBytes digests the input and returns a Digest. func FromBytes(p []byte) Digest { - digester := Canonical.New() - - if _, err := digester.Hash().Write(p); err != nil { - // Writes to a Hash should never fail. None of the existing - // hash implementations in the stdlib or hashes vendored - // here can return errors from Write. Having a panic in this - // condition instead of having FromBytes return an error value - // avoids unnecessary error handling paths in all callers. - panic("write to hash function returned error: " + err.Error()) - } - - return digester.Digest() + return Canonical.FromBytes(p) } // Validate checks that the contents of d is a valid digest, returning an diff --git a/vendor/github.com/docker/distribution/digest/digester.go b/vendor/github.com/docker/distribution/digest/digester.go index cbb2e3683..f3105a45b 100644 --- a/vendor/github.com/docker/distribution/digest/digester.go +++ b/vendor/github.com/docker/distribution/digest/digester.go @@ -2,6 +2,7 @@ package digest import ( "crypto" + "fmt" "hash" "io" ) @@ -84,11 +85,18 @@ func (a Algorithm) New() Digester { } } -// Hash returns a new hash as used by the algorithm. If not available, nil is -// returned. Make sure to check Available before calling. +// Hash returns a new hash as used by the algorithm. If not available, the +// method will panic. Check Algorithm.Available() before calling. func (a Algorithm) Hash() hash.Hash { if !a.Available() { - return nil + // NOTE(stevvooe): A missing hash is usually a programming error that + // must be resolved at compile time. We don't import in the digest + // package to allow users to choose their hash implementation (such as + // when using stevvooe/resumable or a hardware accelerated package). + // + // Applications that may want to resolve the hash at runtime should + // call Algorithm.Available before call Algorithm.Hash(). + panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) } return algorithms[a].New() @@ -105,6 +113,22 @@ func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { return digester.Digest(), nil } +// FromBytes digests the input and returns a Digest. +func (a Algorithm) FromBytes(p []byte) Digest { + digester := a.New() + + if _, err := digester.Hash().Write(p); err != nil { + // Writes to a Hash should never fail. None of the existing + // hash implementations in the stdlib or hashes vendored + // here can return errors from Write. Having a panic in this + // condition instead of having FromBytes return an error value + // avoids unnecessary error handling paths in all callers. + panic("write to hash function returned error: " + err.Error()) + } + + return digester.Digest() +} + // TODO(stevvooe): Allow resolution of verifiers using the digest type and // this registration system. diff --git a/vendor/github.com/docker/distribution/digest/set.go b/vendor/github.com/docker/distribution/digest/set.go index 3fac41b40..4b9313c1a 100644 --- a/vendor/github.com/docker/distribution/digest/set.go +++ b/vendor/github.com/docker/distribution/digest/set.go @@ -22,7 +22,7 @@ var ( // may be easily referenced by easily referenced by a string // representation of the digest as well as short representation. // The uniqueness of the short representation is based on other -// digests in the set. If digests are ommited from this set, +// digests in the set. If digests are omitted from this set, // collisions in a larger set may not be detected, therefore it // is important to always do short representation lookups on // the complete set of digests. To mitigate collisions, an diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go index 77bd096ec..c20f28113 100644 --- a/vendor/github.com/docker/distribution/errors.go +++ b/vendor/github.com/docker/distribution/errors.go @@ -8,6 +8,10 @@ import ( "github.com/docker/distribution/digest" ) +// ErrAccessDenied is returned when an access to a requested resource is +// denied. +var ErrAccessDenied = errors.New("access denied") + // ErrManifestNotModified is returned when a conditional manifest GetByTag // returns nil due to the client indicating it has the latest version var ErrManifestNotModified = errors.New("manifest not modified") diff --git a/vendor/github.com/docker/distribution/health/doc.go b/vendor/github.com/docker/distribution/health/doc.go index 194b8a566..8c106b42b 100644 --- a/vendor/github.com/docker/distribution/health/doc.go +++ b/vendor/github.com/docker/distribution/health/doc.go @@ -2,7 +2,7 @@ // The health package works expvar style. By importing the package the debug // server is getting a "/debug/health" endpoint that returns the current // status of the application. -// If there are no errors, "/debug/health" will return a HTTP 200 status, +// If there are no errors, "/debug/health" will return an HTTP 200 status, // together with an empty JSON reply "{}". If there are any checks // with errors, the JSON reply will include all the failed checks, and the // response will be have an HTTP 503 status. diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go index 7cb91ab82..2ac7c8f21 100644 --- a/vendor/github.com/docker/distribution/manifests.go +++ b/vendor/github.com/docker/distribution/manifests.go @@ -2,6 +2,7 @@ package distribution import ( "fmt" + "mime" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" @@ -25,7 +26,7 @@ type Manifest interface { // specific data is passed into the function which creates the builder. type ManifestBuilder interface { // Build creates the manifest from his builder. - Build() (Manifest, error) + Build(ctx context.Context) (Manifest, error) // References returns a list of objects which have been added to this // builder. The dependencies are returned in the order they were added, @@ -52,12 +53,12 @@ type ManifestService interface { // Delete removes the manifest specified by the given digest. Deleting // a manifest that doesn't exist will return ErrManifestNotFound Delete(ctx context.Context, dgst digest.Digest) error +} - // Enumerate fills 'manifests' with the manifests in this service up - // to the size of 'manifests' and returns 'n' for the number of entries - // which were filled. 'last' contains an offset in the manifest set - // and can be used to resume iteration. - //Enumerate(ctx context.Context, manifests []Manifest, last Manifest) (n int, err error) +// ManifestEnumerator enables iterating over manifests +type ManifestEnumerator interface { + // Enumerate calls ingester for each manifest. + Enumerate(ctx context.Context, ingester func(digest.Digest) error) error } // Describable is an interface for descriptors @@ -68,7 +69,9 @@ type Describable interface { // ManifestMediaTypes returns the supported media types for manifests. func ManifestMediaTypes() (mediaTypes []string) { for t := range mappings { - mediaTypes = append(mediaTypes, t) + if t != "" { + mediaTypes = append(mediaTypes, t) + } } return } @@ -78,12 +81,26 @@ type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) var mappings = make(map[string]UnmarshalFunc, 0) -// UnmarshalManifest looks up manifest unmarshall functions based on +// UnmarshalManifest looks up manifest unmarshal functions based on // MediaType -func UnmarshalManifest(mediatype string, p []byte) (Manifest, Descriptor, error) { +func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { + // Need to look up by the actual media type, not the raw contents of + // the header. Strip semicolons and anything following them. + var mediatype string + if ctHeader != "" { + var err error + mediatype, _, err = mime.ParseMediaType(ctHeader) + if err != nil { + return nil, Descriptor{}, err + } + } + unmarshalFunc, ok := mappings[mediatype] if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype: %s", mediatype) + unmarshalFunc, ok = mappings[""] + if !ok { + return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) + } } return unmarshalFunc(p) diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go index c3e77c6e8..bb09fa25d 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -3,10 +3,10 @@ // // Grammar // -// reference := repository [ ":" tag ] [ "@" digest ] +// reference := name [ ":" tag ] [ "@" digest ] // name := [hostname '/'] component ['/' component]* // hostname := hostcomponent ['.' hostcomponent]* [':' port-number] -// hostcomponent := /([a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])/ +// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // component := alpha-numeric [separator alpha-numeric]* // alpha-numeric := /[a-z0-9]+/ @@ -46,8 +46,7 @@ var ( // ErrNameEmpty is returned for empty, invalid repository names. ErrNameEmpty = errors.New("repository name must have at least one component") - // ErrNameTooLong is returned when a repository name is longer than - // RepositoryNameTotalLengthMax + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) ) diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go index a4ffe5b64..9a7d366bc 100644 --- a/vendor/github.com/docker/distribution/reference/regexp.go +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -22,7 +22,7 @@ var ( // hostnameComponentRegexp restricts the registry hostname component of a // repository name to start with a component as defined by hostnameRegexp // and followed by an optional port. - hostnameComponentRegexp = match(`(?:[a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])`) + hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) // hostnameRegexp defines the structure of potential hostname components // that may be part of image names. This is purposely a subset of what is @@ -49,7 +49,7 @@ var ( // NameRegexp is the format for the name component of references. The // regexp has capturing groups for the hostname and name part omitting - // the seperating forward slash from either. + // the separating forward slash from either. NameRegexp = expression( optional(hostnameRegexp, literal(`/`)), nameComponentRegexp, diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go index ce5d77792..1ede31ebb 100644 --- a/vendor/github.com/docker/distribution/registry.go +++ b/vendor/github.com/docker/distribution/registry.go @@ -2,6 +2,7 @@ package distribution import ( "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" ) // Scope defines the set of items that match a namespace. @@ -32,13 +33,24 @@ type Namespace interface { // Repository should return a reference to the named repository. The // registry may or may not have the repository but should always return a // reference. - Repository(ctx context.Context, name string) (Repository, error) + Repository(ctx context.Context, name reference.Named) (Repository, error) // Repositories fills 'repos' with a lexigraphically sorted catalog of repositories // up to the size of 'repos' and returns the value 'n' for the number of entries // which were filled. 'last' contains an offset in the catalog, and 'err' will be // set to io.EOF if there are no more entries to obtain. Repositories(ctx context.Context, repos []string, last string) (n int, err error) + + // Blobs returns a blob enumerator to access all blobs + Blobs() BlobEnumerator + + // BlobStatter returns a BlobStatter to control + BlobStatter() BlobStatter +} + +// RepositoryEnumerator describes an operation to enumerate repositories +type RepositoryEnumerator interface { + Enumerate(ctx context.Context, ingester func(string) error) error } // ManifestServiceOption is a function argument for Manifest Service methods @@ -46,10 +58,24 @@ type ManifestServiceOption interface { Apply(ManifestService) error } +// WithTag allows a tag to be passed into Put +func WithTag(tag string) ManifestServiceOption { + return WithTagOption{tag} +} + +// WithTagOption holds a tag +type WithTagOption struct{ Tag string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithTagOption) Apply(m ManifestService) error { + // no implementation + return nil +} + // Repository is a named collection of manifests and layers. type Repository interface { - // Name returns the name of the repository. - Name() string + // Named returns the name of the repository. + Named() reference.Named // Manifests returns a reference to this repository's manifest service. // with the supplied options applied. diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go index fdaddbcf8..6d9bb4b62 100644 --- a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go +++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go @@ -25,7 +25,8 @@ func (ec ErrorCode) ErrorCode() ErrorCode { // Error returns the ID/Value func (ec ErrorCode) Error() string { - return ec.Descriptor().Value + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) } // Descriptor returns the descriptor for the error code. @@ -68,6 +69,15 @@ func (ec *ErrorCode) UnmarshalText(text []byte) error { return nil } +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + // WithDetail creates a new Error struct based on the passed-in info and // set the Detail property appropriately func (ec ErrorCode) WithDetail(detail interface{}) Error { @@ -104,9 +114,7 @@ func (e Error) ErrorCode() ErrorCode { // Error returns a human readable representation of the error. func (e Error) Error() string { - return fmt.Sprintf("%s: %s", - strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), - e.Message) + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) } // WithDetail will return a new Error, based on the current one, but with diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go index 01c34384b..71cf6f7af 100644 --- a/vendor/github.com/docker/distribution/registry/api/errcode/register.go +++ b/vendor/github.com/docker/distribution/registry/api/errcode/register.go @@ -63,6 +63,16 @@ var ( Description: "Returned when a service is not available", HTTPStatusCode: http.StatusServiceUnavailable, }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) ) var nextCode = 1000 diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go index 52c725dc2..fc42c1c41 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -271,7 +271,7 @@ type MethodDescriptor struct { // RequestDescriptor per API use case. type RequestDescriptor struct { // Name provides a short identifier for the request, usable as a title or - // to provide quick context for the particalar request. + // to provide quick context for the particular request. Name string // Description should cover the requests purpose, covering any details for @@ -303,14 +303,14 @@ type RequestDescriptor struct { // ResponseDescriptor describes the components of an API response. type ResponseDescriptor struct { // Name provides a short identifier for the response, usable as a title or - // to provide quick context for the particalar response. + // to provide quick context for the particular response. Name string // Description should provide a brief overview of the role of the // response. Description string - // StatusCode specifies the status recieved by this particular response. + // StatusCode specifies the status received by this particular response. StatusCode int // Headers covers any headers that may be returned from the response. @@ -514,7 +514,7 @@ var routeDescriptors = []RouteDescriptor{ digestHeader, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "", Format: manifestBody, }, }, @@ -553,7 +553,7 @@ var routeDescriptors = []RouteDescriptor{ referenceParameterDescriptor, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "", Format: manifestBody, }, Successes: []ResponseDescriptor{ @@ -1041,6 +1041,70 @@ var routeDescriptors = []RouteDescriptor{ deniedResponseDescriptor, }, }, + { + Name: "Mount Blob", + Description: "Mount a blob identified by the `mount` parameter from another repository.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "mount", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of blob to mount from the source repository.`, + }, + { + Name: "from", + Type: "query", + Format: "", + Regexp: reference.NameRegexp, + Description: `Name of the source repository.`, + }, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been mounted in the repository and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, }, }, }, @@ -1433,8 +1497,8 @@ var routeDescriptors = []RouteDescriptor{ Description: "Retrieve a sorted, json list of repositories available in the registry.", Requests: []RequestDescriptor{ { - Name: "Catalog Fetch Complete", - Description: "Request an unabridged list of repositories available.", + Name: "Catalog Fetch", + Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", Successes: []ResponseDescriptor{ { Description: "Returns the unabridged list of repositories as a json response.", diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go index ece52a2cd..97d6923aa 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/errors.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/errors.go @@ -84,7 +84,7 @@ var ( }) // ErrorCodeManifestUnverified is returned when the manifest fails - // signature verfication. + // signature verification. ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_UNVERIFIED", Message: "manifest failed signature verification", diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go index 429743940..a959aaa89 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/urls.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go @@ -5,7 +5,7 @@ import ( "net/url" "strings" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/gorilla/mux" ) @@ -17,33 +17,35 @@ import ( // under "/foo/v2/...". Most application will only provide a schema, host and // port, such as "https://localhost:5000/". type URLBuilder struct { - root *url.URL // url root (ie http://localhost/) - router *mux.Router + root *url.URL // url root (ie http://localhost/) + router *mux.Router + relative bool } // NewURLBuilder creates a URLBuilder with provided root url object. -func NewURLBuilder(root *url.URL) *URLBuilder { +func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { return &URLBuilder{ - root: root, - router: Router(), + root: root, + router: Router(), + relative: relative, } } // NewURLBuilderFromString workes identically to NewURLBuilder except it takes // a string argument for the root, returning an error if it is not a valid // url. -func NewURLBuilderFromString(root string) (*URLBuilder, error) { +func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { u, err := url.Parse(root) if err != nil { return nil, err } - return NewURLBuilder(u), nil + return NewURLBuilder(u, relative), nil } // NewURLBuilderFromRequest uses information from an *http.Request to // construct the root url. -func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { +func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { var scheme string forwardedProto := r.Header.Get("X-Forwarded-Proto") @@ -85,7 +87,7 @@ func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { u.Path = requestPath[0 : index+1] } - return NewURLBuilder(u) + return NewURLBuilder(u, relative) } // BuildBaseURL constructs a base url for the API, typically just "/v2/". @@ -113,10 +115,10 @@ func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { } // BuildTagsURL constructs a url to list the tags in the named repository. -func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { +func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { route := ub.cloneRoute(RouteNameTags) - tagsURL, err := route.URL("name", name) + tagsURL, err := route.URL("name", name.Name()) if err != nil { return "", err } @@ -126,10 +128,18 @@ func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { // BuildManifestURL constructs a url for the manifest identified by name and // reference. The argument reference may be either a tag or digest. -func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) { +func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { route := ub.cloneRoute(RouteNameManifest) - manifestURL, err := route.URL("name", name, "reference", reference) + tagOrDigest := "" + switch v := ref.(type) { + case reference.Tagged: + tagOrDigest = v.Tag() + case reference.Digested: + tagOrDigest = v.Digest().String() + } + + manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) if err != nil { return "", err } @@ -138,10 +148,10 @@ func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) { } // BuildBlobURL constructs the url for the blob identified by name and dgst. -func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) { +func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { route := ub.cloneRoute(RouteNameBlob) - layerURL, err := route.URL("name", name, "digest", dgst.String()) + layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) if err != nil { return "", err } @@ -151,10 +161,10 @@ func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, err // BuildBlobUploadURL constructs a url to begin a blob upload in the // repository identified by name. -func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (string, error) { +func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUpload) - uploadURL, err := route.URL("name", name) + uploadURL, err := route.URL("name", name.Name()) if err != nil { return "", err } @@ -166,10 +176,10 @@ func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (str // including any url values. This should generally not be used by clients, as // this url is provided by server implementations during the blob upload // process. -func (ub *URLBuilder) BuildBlobUploadChunkURL(name, uuid string, values ...url.Values) (string, error) { +func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUploadChunk) - uploadURL, err := route.URL("name", name, "uuid", uuid) + uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) if err != nil { return "", err } @@ -186,12 +196,13 @@ func (ub *URLBuilder) cloneRoute(name string) clonedRoute { *route = *ub.router.GetRoute(name) // clone the route *root = *ub.root - return clonedRoute{Route: route, root: root} + return clonedRoute{Route: route, root: root, relative: ub.relative} } type clonedRoute struct { *mux.Route - root *url.URL + root *url.URL + relative bool } func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { @@ -200,11 +211,17 @@ func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { return nil, err } + if cr.relative { + return routeURL, nil + } + if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { routeURL.Path = routeURL.Path[1:] } - return cr.root.ResolveReference(routeURL), nil + url := cr.root.ResolveReference(routeURL) + url.Scheme = cr.root.Scheme + return url, nil } // appendValuesURL appends the parameters to the url. diff --git a/vendor/github.com/docker/distribution/registry/auth/auth.go b/vendor/github.com/docker/distribution/registry/auth/auth.go index b3bb580d2..0cb37235b 100644 --- a/vendor/github.com/docker/distribution/registry/auth/auth.go +++ b/vendor/github.com/docker/distribution/registry/auth/auth.go @@ -33,12 +33,31 @@ package auth import ( + "errors" "fmt" "net/http" "github.com/docker/distribution/context" ) +const ( + // UserKey is used to get the user object from + // a user context + UserKey = "auth.user" + + // UserNameKey is used to get the user name from + // a user context + UserNameKey = "auth.user.name" +) + +var ( + // ErrInvalidCredential is returned when the auth token does not authenticate correctly. + ErrInvalidCredential = errors.New("invalid authorization credential") + + // ErrAuthenticationFailure returned when authentication fails. + ErrAuthenticationFailure = errors.New("authentication failure") +) + // UserInfo carries information about // an autenticated/authorized client. type UserInfo struct { @@ -87,6 +106,11 @@ type AccessController interface { Authorized(ctx context.Context, access ...Access) (context.Context, error) } +// CredentialAuthenticator is an object which is able to authenticate credentials +type CredentialAuthenticator interface { + AuthenticateUser(username, password string) error +} + // WithUser returns a context with the authorized user info. func WithUser(ctx context.Context, user UserInfo) context.Context { return userInfoContext{ @@ -102,9 +126,9 @@ type userInfoContext struct { func (uic userInfoContext) Value(key interface{}) interface{} { switch key { - case "auth.user": + case UserKey: return uic.user - case "auth.user.name": + case UserNameKey: return uic.user.Name } diff --git a/vendor/github.com/docker/distribution/registry/auth/htpasswd/access.go b/vendor/github.com/docker/distribution/registry/auth/htpasswd/access.go index 82d3556dc..4f71dc274 100644 --- a/vendor/github.com/docker/distribution/registry/auth/htpasswd/access.go +++ b/vendor/github.com/docker/distribution/registry/auth/htpasswd/access.go @@ -6,7 +6,6 @@ package htpasswd import ( - "errors" "fmt" "net/http" "os" @@ -15,14 +14,6 @@ import ( "github.com/docker/distribution/registry/auth" ) -var ( - // ErrInvalidCredential is returned when the auth token does not authenticate correctly. - ErrInvalidCredential = errors.New("invalid authorization credential") - - // ErrAuthenticationFailure returned when authentication failure to be presented to agent. - ErrAuthenticationFailure = errors.New("authentication failured") -) - type accessController struct { realm string htpasswd *htpasswd @@ -65,21 +56,25 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut if !ok { return nil, &challenge{ realm: ac.realm, - err: ErrInvalidCredential, + err: auth.ErrInvalidCredential, } } - if err := ac.htpasswd.authenticateUser(username, password); err != nil { + if err := ac.AuthenticateUser(username, password); err != nil { context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) return nil, &challenge{ realm: ac.realm, - err: ErrAuthenticationFailure, + err: auth.ErrAuthenticationFailure, } } return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil } +func (ac *accessController) AuthenticateUser(username, password string) error { + return ac.htpasswd.authenticateUser(username, password) +} + // challenge implements the auth.Challenge interface. type challenge struct { realm string diff --git a/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go b/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go index 494ad0a76..b10b256c7 100644 --- a/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go +++ b/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go @@ -6,6 +6,8 @@ import ( "io" "strings" + "github.com/docker/distribution/registry/auth" + "golang.org/x/crypto/bcrypt" ) @@ -33,12 +35,12 @@ func (htpasswd *htpasswd) authenticateUser(username string, password string) err // timing attack paranoia bcrypt.CompareHashAndPassword([]byte{}, []byte(password)) - return ErrAuthenticationFailure + return auth.ErrAuthenticationFailure } err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password)) if err != nil { - return ErrAuthenticationFailure + return auth.ErrAuthenticationFailure } return nil @@ -46,7 +48,7 @@ func (htpasswd *htpasswd) authenticateUser(username string, password string) err // parseHTPasswd parses the contents of htpasswd. This will read all the // entries in the file, whether or not they are needed. An error is returned -// if an syntax errors are encountered or if the reader fails. +// if a syntax errors are encountered or if the reader fails. func parseHTPasswd(rd io.Reader) (map[string][]byte, error) { entries := map[string][]byte{} scanner := bufio.NewScanner(rd) diff --git a/vendor/github.com/docker/distribution/registry/auth/token/token.go b/vendor/github.com/docker/distribution/registry/auth/token/token.go index 166816eea..2598f362a 100644 --- a/vendor/github.com/docker/distribution/registry/auth/token/token.go +++ b/vendor/github.com/docker/distribution/registry/auth/token/token.go @@ -52,11 +52,11 @@ type ClaimSet struct { // Header describes the header section of a JSON Web Token. type Header struct { - Type string `json:"typ"` - SigningAlg string `json:"alg"` - KeyID string `json:"kid,omitempty"` - X5c []string `json:"x5c,omitempty"` - RawJWK json.RawMessage `json:"jwk,omitempty"` + Type string `json:"typ"` + SigningAlg string `json:"alg"` + KeyID string `json:"kid,omitempty"` + X5c []string `json:"x5c,omitempty"` + RawJWK *json.RawMessage `json:"jwk,omitempty"` } // Token describes a JSON Web Token. @@ -193,7 +193,7 @@ func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust. switch { case len(x5c) > 0: signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) - case len(rawJWK) > 0: + case rawJWK != nil: signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) case len(keyID) > 0: signingKey = verifyOpts.TrustedKeys[keyID] @@ -266,8 +266,8 @@ func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtru return } -func parseAndVerifyRawJWK(rawJWK json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { - pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(rawJWK)) +func parseAndVerifyRawJWK(rawJWK *json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { + pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(*rawJWK)) if err != nil { return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) } diff --git a/vendor/github.com/docker/distribution/registry/client/auth/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/authchallenge.go index a6ad45d85..c8cd83bb9 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/authchallenge.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/authchallenge.go @@ -25,7 +25,7 @@ type Challenge struct { type ChallengeManager interface { // GetChallenges returns the challenges for the given // endpoint URL. - GetChallenges(endpoint string) ([]Challenge, error) + GetChallenges(endpoint url.URL) ([]Challenge, error) // AddResponse adds the response to the challenge // manager. The challenges will be parsed out of @@ -48,8 +48,10 @@ func NewSimpleChallengeManager() ChallengeManager { type simpleChallengeManager map[string][]Challenge -func (m simpleChallengeManager) GetChallenges(endpoint string) ([]Challenge, error) { - challenges := m[endpoint] +func (m simpleChallengeManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + endpoint.Host = strings.ToLower(endpoint.Host) + + challenges := m[endpoint.String()] return challenges, nil } @@ -60,11 +62,10 @@ func (m simpleChallengeManager) AddResponse(resp *http.Response) error { } urlCopy := url.URL{ Path: resp.Request.URL.Path, - Host: resp.Request.URL.Host, + Host: strings.ToLower(resp.Request.URL.Host), Scheme: resp.Request.URL.Scheme, } m[urlCopy.String()] = challenges - return nil } diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session.go b/vendor/github.com/docker/distribution/registry/client/auth/session.go index 6c92fc343..f3497b17a 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/session.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/session.go @@ -15,6 +15,18 @@ import ( "github.com/docker/distribution/registry/client/transport" ) +var ( + // ErrNoBasicAuthCredentials is returned if a request can't be authorized with + // basic auth due to lack of credentials. + ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") + + // ErrNoToken is returned if a request is successful but the body does not + // contain an authorization token. + ErrNoToken = errors.New("authorization server did not include a token in the response") +) + +const defaultClientID = "registry-client" + // AuthenticationHandler is an interface for authorizing a request from // params from a "WWW-Authenicate" header for a single scheme. type AuthenticationHandler interface { @@ -32,6 +44,14 @@ type AuthenticationHandler interface { type CredentialStore interface { // Basic returns basic auth for the given URL Basic(*url.URL) (string, string) + + // RefreshToken returns a refresh token for the + // given URL and service + RefreshToken(*url.URL, string) string + + // SetRefreshToken sets the refresh token if none + // is provided for the given url and service + SetRefreshToken(realm *url.URL, service, token string) } // NewAuthorizer creates an authorizer which can handle multiple authentication @@ -63,9 +83,7 @@ func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { Path: req.URL.Path[:v2Root+4], } - pingEndpoint := ping.String() - - challenges, err := ea.challenges.GetChallenges(pingEndpoint) + challenges, err := ea.challenges.GetChallenges(ping) if err != nil { return err } @@ -101,25 +119,47 @@ type clock interface { type tokenHandler struct { header http.Header creds CredentialStore - scope tokenScope transport http.RoundTripper clock clock + offlineAccess bool + forceOAuth bool + clientID string + scopes []Scope + tokenLock sync.Mutex tokenCache string tokenExpiration time.Time } -// tokenScope represents the scope at which a token will be requested. -// This represents a specific action on a registry resource. -type tokenScope struct { - Resource string - Scope string - Actions []string +// Scope is a type which is serializable to a string +// using the allow scope grammar. +type Scope interface { + String() string +} + +// RepositoryScope represents a token scope for access +// to a repository. +type RepositoryScope struct { + Repository string + Actions []string } -func (ts tokenScope) String() string { - return fmt.Sprintf("%s:%s:%s", ts.Resource, ts.Scope, strings.Join(ts.Actions, ",")) +// String returns the string representation of the repository +// using the scope grammar +func (rs RepositoryScope) String() string { + return fmt.Sprintf("repository:%s:%s", rs.Repository, strings.Join(rs.Actions, ",")) +} + +// TokenHandlerOptions is used to configure a new token handler +type TokenHandlerOptions struct { + Transport http.RoundTripper + Credentials CredentialStore + + OfflineAccess bool + ForceOAuth bool + ClientID string + Scopes []Scope } // An implementation of clock for providing real time data. @@ -131,21 +171,33 @@ func (realClock) Now() time.Time { return time.Now() } // NewTokenHandler creates a new AuthenicationHandler which supports // fetching tokens from a remote token server. func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { - return newTokenHandler(transport, creds, realClock{}, scope, actions...) + // Create options... + return NewTokenHandlerWithOptions(TokenHandlerOptions{ + Transport: transport, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: scope, + Actions: actions, + }, + }, + }) } -// newTokenHandler exposes the option to provide a clock to manipulate time in unit testing. -func newTokenHandler(transport http.RoundTripper, creds CredentialStore, c clock, scope string, actions ...string) AuthenticationHandler { - return &tokenHandler{ - transport: transport, - creds: creds, - clock: c, - scope: tokenScope{ - Resource: "repository", - Scope: scope, - Actions: actions, - }, +// NewTokenHandlerWithOptions creates a new token handler using the provided +// options structure. +func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { + handler := &tokenHandler{ + transport: options.Transport, + creds: options.Credentials, + offlineAccess: options.OfflineAccess, + forceOAuth: options.ForceOAuth, + clientID: options.ClientID, + scopes: options.Scopes, + clock: realClock{}, } + + return handler } func (th *tokenHandler) client() *http.Client { @@ -160,71 +212,164 @@ func (th *tokenHandler) Scheme() string { } func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - if err := th.refreshToken(params); err != nil { + var additionalScopes []string + if fromParam := req.URL.Query().Get("from"); fromParam != "" { + additionalScopes = append(additionalScopes, RepositoryScope{ + Repository: fromParam, + Actions: []string{"pull"}, + }.String()) + } + + token, err := th.getToken(params, additionalScopes...) + if err != nil { return err } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.tokenCache)) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) return nil } -func (th *tokenHandler) refreshToken(params map[string]string) error { +func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { th.tokenLock.Lock() defer th.tokenLock.Unlock() + scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) + for _, scope := range th.scopes { + scopes = append(scopes, scope.String()) + } + var addedScopes bool + for _, scope := range additionalScopes { + scopes = append(scopes, scope) + addedScopes = true + } + now := th.clock.Now() - if now.After(th.tokenExpiration) { - tr, err := th.fetchToken(params) + if now.After(th.tokenExpiration) || addedScopes { + token, expiration, err := th.fetchToken(params, scopes) if err != nil { - return err + return "", err } - th.tokenCache = tr.Token - th.tokenExpiration = tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second) + + // do not update cache for added scope tokens + if !addedScopes { + th.tokenCache = token + th.tokenExpiration = expiration + } + + return token, nil } - return nil + return th.tokenCache, nil } -type tokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` +type postTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` } -func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenResponse, err error) { - //log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, ta.auth.Username) - realm, ok := params["realm"] - if !ok { - return nil, errors.New("no realm specified for token auth challenge") - } +func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { + form := url.Values{} + form.Set("scope", strings.Join(scopes, " ")) + form.Set("service", service) - // TODO(dmcgowan): Handle empty scheme + clientID := th.clientID + if clientID == "" { + // Use default client, this is a required field + clientID = defaultClientID + } + form.Set("client_id", clientID) + + if refreshToken != "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", refreshToken) + } else if th.creds != nil { + form.Set("grant_type", "password") + username, password := th.creds.Basic(realm) + form.Set("username", username) + form.Set("password", password) + + // attempt to get a refresh token + form.Set("access_type", "offline") + } else { + // refuse to do oauth without a grant type + return "", time.Time{}, fmt.Errorf("no supported grant type") + } - realmURL, err := url.Parse(realm) + resp, err := th.client().PostForm(realm.String(), form) if err != nil { - return nil, fmt.Errorf("invalid token auth challenge realm: %s", err) + return "", time.Time{}, err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + err := client.HandleErrorResponse(resp) + return "", time.Time{}, err + } + + decoder := json.NewDecoder(resp.Body) + + var tr postTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) } - req, err := http.NewRequest("GET", realmURL.String(), nil) + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now().UTC() + } + + return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +type getTokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` +} + +func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { + + req, err := http.NewRequest("GET", realm.String(), nil) if err != nil { - return nil, err + return "", time.Time{}, err } reqParams := req.URL.Query() - service := params["service"] - scope := th.scope.String() if service != "" { reqParams.Add("service", service) } - for _, scopeField := range strings.Fields(scope) { - reqParams.Add("scope", scopeField) + for _, scope := range scopes { + reqParams.Add("scope", scope) + } + + if th.offlineAccess { + reqParams.Add("offline_token", "true") + clientID := th.clientID + if clientID == "" { + clientID = defaultClientID + } + reqParams.Add("client_id", clientID) } if th.creds != nil { - username, password := th.creds.Basic(realmURL) + username, password := th.creds.Basic(realm) if username != "" && password != "" { reqParams.Add("account", username) req.SetBasicAuth(username, password) @@ -235,19 +380,24 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenRespon resp, err := th.client().Do(req) if err != nil { - return nil, err + return "", time.Time{}, err } defer resp.Body.Close() if !client.SuccessStatus(resp.StatusCode) { - return nil, fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) + err := client.HandleErrorResponse(resp) + return "", time.Time{}, err } decoder := json.NewDecoder(resp.Body) - tr := new(tokenResponse) - if err = decoder.Decode(tr); err != nil { - return nil, fmt.Errorf("unable to decode token response: %s", err) + var tr getTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && th.creds != nil { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) } // `access_token` is equivalent to `token` and if both are specified @@ -258,21 +408,48 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenRespon } if tr.Token == "" { - return nil, errors.New("authorization server did not include a token in the response") + return "", time.Time{}, ErrNoToken } if tr.ExpiresIn < minimumTokenLifetimeSeconds { - logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) // The default/minimum lifetime. tr.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) } if tr.IssuedAt.IsZero() { // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now() + tr.IssuedAt = th.clock.Now().UTC() + } + + return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { + realm, ok := params["realm"] + if !ok { + return "", time.Time{}, errors.New("no realm specified for token auth challenge") + } + + // TODO(dmcgowan): Handle empty scheme and relative realm + realmURL, err := url.Parse(realm) + if err != nil { + return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + service := params["service"] + + var refreshToken string + + if th.creds != nil { + refreshToken = th.creds.RefreshToken(realmURL, service) + } + + if refreshToken != "" || th.forceOAuth { + return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) } - return tr, nil + return th.fetchTokenWithBasicAuth(realmURL, service, scopes) } type basicHandler struct { @@ -299,5 +476,5 @@ func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]st return nil } } - return errors.New("no basic auth credentials") + return ErrNoBasicAuthCredentials } diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go index c7eee4e8c..e3ffcb00f 100644 --- a/vendor/github.com/docker/distribution/registry/client/blob_writer.go +++ b/vendor/github.com/docker/distribution/registry/client/blob_writer.go @@ -6,7 +6,6 @@ import ( "io" "io/ioutil" "net/http" - "os" "time" "github.com/docker/distribution" @@ -33,7 +32,7 @@ func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUploadUnknown } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) } func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { @@ -104,21 +103,8 @@ func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { } -func (hbu *httpBlobUpload) Seek(offset int64, whence int) (int64, error) { - newOffset := hbu.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset += int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) - } - - hbu.offset = newOffset - - return hbu.offset, nil +func (hbu *httpBlobUpload) Size() int64 { + return hbu.offset } func (hbu *httpBlobUpload) ID() string { diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go index 7305c021c..f73e3c230 100644 --- a/vendor/github.com/docker/distribution/registry/client/errors.go +++ b/vendor/github.com/docker/distribution/registry/client/errors.go @@ -2,6 +2,7 @@ package client import ( "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -10,6 +11,10 @@ import ( "github.com/docker/distribution/registry/api/errcode" ) +// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty +// errcode.Errors slice. +var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") + // UnexpectedHTTPStatusError is returned when an unexpected HTTP status is // returned when making a registry api call. type UnexpectedHTTPStatusError struct { @@ -17,46 +22,80 @@ type UnexpectedHTTPStatusError struct { } func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status) + return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) } // UnexpectedHTTPResponseError is returned when an expected HTTP status code // is returned, but the content was unexpected and failed to be parsed. type UnexpectedHTTPResponseError struct { - ParseErr error - Response []byte + ParseErr error + StatusCode int + Response []byte } func (e *UnexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), string(e.Response)) + return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) } -func parseHTTPErrorResponse(r io.Reader) error { +func parseHTTPErrorResponse(statusCode int, r io.Reader) error { var errors errcode.Errors body, err := ioutil.ReadAll(r) if err != nil { return err } + // For backward compatibility, handle irregularly formatted + // messages that contain a "details" field. + var detailsErr struct { + Details string `json:"details"` + } + err = json.Unmarshal(body, &detailsErr) + if err == nil && detailsErr.Details != "" { + switch statusCode { + case http.StatusUnauthorized: + return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) + case http.StatusTooManyRequests: + return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) + default: + return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) + } + } + if err := json.Unmarshal(body, &errors); err != nil { return &UnexpectedHTTPResponseError{ - ParseErr: err, - Response: body, + ParseErr: err, + StatusCode: statusCode, + Response: body, } } + + if len(errors) == 0 { + // If there was no error specified in the body, return + // UnexpectedHTTPResponseError. + return &UnexpectedHTTPResponseError{ + ParseErr: ErrNoErrorsInBody, + StatusCode: statusCode, + Response: body, + } + } + return errors } -func handleErrorResponse(resp *http.Response) error { +// HandleErrorResponse returns error parsed from HTTP response for an +// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An +// UnexpectedHTTPStatusError returned for response code outside of expected +// range. +func HandleErrorResponse(resp *http.Response) error { if resp.StatusCode == 401 { - err := parseHTTPErrorResponse(resp.Body) + err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) } return err } if resp.StatusCode >= 400 && resp.StatusCode < 500 { - return parseHTTPErrorResponse(resp.Body) + return parseHTTPErrorResponse(resp.StatusCode, resp.Body) } return &UnexpectedHTTPStatusError{Status: resp.Status} } diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go index 9d489dd58..323ab5086 100644 --- a/vendor/github.com/docker/distribution/registry/client/repository.go +++ b/vendor/github.com/docker/distribution/registry/client/repository.go @@ -27,16 +27,50 @@ type Registry interface { Repositories(ctx context.Context, repos []string, last string) (n int, err error) } +// checkHTTPRedirect is a callback that can manipulate redirected HTTP +// requests. It is used to preserve Accept and Range headers. +func checkHTTPRedirect(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + + if len(via) > 0 { + for headerName, headerVals := range via[0].Header { + if headerName != "Accept" && headerName != "Range" { + continue + } + for _, val := range headerVals { + // Don't add to redirected request if redirected + // request already has a header with the same + // name and value. + hasValue := false + for _, existingVal := range req.Header[headerName] { + if existingVal == val { + hasValue = true + break + } + } + if !hasValue { + req.Header.Add(headerName, val) + } + } + } + } + + return nil +} + // NewRegistry creates a registry namespace which can be used to get a listing of repositories func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { - ub, err := v2.NewURLBuilderFromString(baseURL) + ub, err := v2.NewURLBuilderFromString(baseURL, false) if err != nil { return nil, err } client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, + Transport: transport, + Timeout: 1 * time.Minute, + CheckRedirect: checkHTTPRedirect, } return ®istry{ @@ -91,25 +125,22 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri returnErr = io.EOF } } else { - return 0, handleErrorResponse(resp) + return 0, HandleErrorResponse(resp) } return numFilled, returnErr } // NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - if _, err := reference.ParseNamed(name); err != nil { - return nil, err - } - - ub, err := v2.NewURLBuilderFromString(baseURL) +func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { + ub, err := v2.NewURLBuilderFromString(baseURL, false) if err != nil { return nil, err } client := &http.Client{ - Transport: transport, + Transport: transport, + CheckRedirect: checkHTTPRedirect, // TODO(dmcgowan): create cookie jar } @@ -125,21 +156,21 @@ type repository struct { client *http.Client ub *v2.URLBuilder context context.Context - name string + name reference.Named } -func (r *repository) Name() string { +func (r *repository) Named() reference.Named { return r.name } func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { statter := &blobStatter{ - name: r.Name(), + name: r.name, ub: r.ub, client: r.client, } return &blobs{ - name: r.Name(), + name: r.name, ub: r.ub, client: r.client, statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), @@ -149,7 +180,7 @@ func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { // todo(richardscothern): options should be sent over the wire return &manifests{ - name: r.Name(), + name: r.name, ub: r.ub, client: r.client, etags: make(map[string]string), @@ -161,7 +192,7 @@ func (r *repository) Tags(ctx context.Context) distribution.TagService { client: r.client, ub: r.ub, context: r.context, - name: r.Name(), + name: r.Named(), } } @@ -170,7 +201,7 @@ type tags struct { client *http.Client ub *v2.URLBuilder context context.Context - name string + name reference.Named } // All returns all tags @@ -203,7 +234,7 @@ func (t *tags) All(ctx context.Context) ([]string, error) { tags = tagsResponse.Tags return tags, nil } - return tags, handleErrorResponse(resp) + return tags, HandleErrorResponse(resp) } func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { @@ -253,30 +284,53 @@ func descriptorFromResponse(response *http.Response) (distribution.Descriptor, e // to construct a descriptor for the tag. If the registry doesn't support HEADing // a manifest, fallback to GET. func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - u, err := t.ub.BuildManifestURL(t.name, tag) + ref, err := reference.WithTag(t.name, tag) if err != nil { return distribution.Descriptor{}, err } - var attempts int - resp, err := t.client.Head(u) + u, err := t.ub.BuildManifestURL(ref) + if err != nil { + return distribution.Descriptor{}, err + } + + req, err := http.NewRequest("HEAD", u, nil) + if err != nil { + return distribution.Descriptor{}, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + var attempts int + resp, err := t.client.Do(req) check: if err != nil { return distribution.Descriptor{}, err } + defer resp.Body.Close() switch { case resp.StatusCode >= 200 && resp.StatusCode < 400: return descriptorFromResponse(resp) case resp.StatusCode == http.StatusMethodNotAllowed: - resp, err = t.client.Get(u) + req, err = http.NewRequest("GET", u, nil) + if err != nil { + return distribution.Descriptor{}, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + + resp, err = t.client.Do(req) attempts++ if attempts > 1 { return distribution.Descriptor{}, err } goto check default: - return distribution.Descriptor{}, handleErrorResponse(resp) + return distribution.Descriptor{}, HandleErrorResponse(resp) } } @@ -293,14 +347,18 @@ func (t *tags) Untag(ctx context.Context, tag string) error { } type manifests struct { - name string + name reference.Named ub *v2.URLBuilder client *http.Client etags map[string]string } func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - u, err := ms.ub.BuildManifestURL(ms.name, dgst.String()) + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return false, err + } + u, err := ms.ub.BuildManifestURL(ref) if err != nil { return false, err } @@ -315,7 +373,7 @@ func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, erro } else if resp.StatusCode == http.StatusNotFound { return false, nil } - return false, handleErrorResponse(resp) + return false, HandleErrorResponse(resp) } // AddEtagToTag allows a client to supply an eTag to Get which will be @@ -336,12 +394,37 @@ func (o etagOption) Apply(ms distribution.ManifestService) error { return fmt.Errorf("etag options is a client-only option") } +// ReturnContentDigest allows a client to set a the content digest on +// a successful request from the 'Docker-Content-Digest' header. This +// returned digest is represents the digest which the registry uses +// to refer to the content and can be used to delete the content. +func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { + return contentDigestOption{dgst} +} + +type contentDigestOption struct{ digest *digest.Digest } + +func (o contentDigestOption) Apply(ms distribution.ManifestService) error { + return nil +} + func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + var ( + digestOrTag string + ref reference.Named + err error + contentDgst *digest.Digest + ) - var tag string for _, option := range options { - if opt, ok := option.(withTagOption); ok { - tag = opt.tag + if opt, ok := option.(distribution.WithTagOption); ok { + digestOrTag = opt.Tag + ref, err = reference.WithTag(ms.name, opt.Tag) + if err != nil { + return nil, err + } + } else if opt, ok := option.(contentDigestOption); ok { + contentDgst = opt.digest } else { err := option.Apply(ms) if err != nil { @@ -350,14 +433,15 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis } } - var ref string - if tag != "" { - ref = tag - } else { - ref = dgst.String() + if digestOrTag == "" { + digestOrTag = dgst.String() + ref, err = reference.WithDigest(ms.name, dgst) + if err != nil { + return nil, err + } } - u, err := ms.ub.BuildManifestURL(ms.name, ref) + u, err := ms.ub.BuildManifestURL(ref) if err != nil { return nil, err } @@ -371,8 +455,8 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis req.Header.Add("Accept", t) } - if _, ok := ms.etags[ref]; ok { - req.Header.Set("If-None-Match", ms.etags[ref]) + if _, ok := ms.etags[digestOrTag]; ok { + req.Header.Set("If-None-Match", ms.etags[digestOrTag]) } resp, err := ms.client.Do(req) @@ -383,6 +467,12 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis if resp.StatusCode == http.StatusNotModified { return nil, distribution.ErrManifestNotModified } else if SuccessStatus(resp.StatusCode) { + if contentDgst != nil { + dgst, err := digest.ParseDigest(resp.Header.Get("Docker-Content-Digest")) + if err == nil { + *contentDgst = dgst + } + } mt := resp.Header.Get("Content-Type") body, err := ioutil.ReadAll(resp.Body) @@ -395,32 +485,23 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis } return m, nil } - return nil, handleErrorResponse(resp) -} - -// WithTag allows a tag to be passed into Put which enables the client -// to build a correct URL. -func WithTag(tag string) distribution.ManifestServiceOption { - return withTagOption{tag} -} - -type withTagOption struct{ tag string } - -func (o withTagOption) Apply(m distribution.ManifestService) error { - if _, ok := m.(*manifests); ok { - return nil - } - return fmt.Errorf("withTagOption is a client-only option") + return nil, HandleErrorResponse(resp) } // Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the -// tag name in order to build the correct upload URL. This state is written and read under a lock. +// tag name in order to build the correct upload URL. func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - var tag string + ref := ms.name + var tagged bool for _, option := range options { - if opt, ok := option.(withTagOption); ok { - tag = opt.tag + if opt, ok := option.(distribution.WithTagOption); ok { + var err error + ref, err = reference.WithTag(ref, opt.Tag) + if err != nil { + return "", err + } + tagged = true } else { err := option.Apply(ms) if err != nil { @@ -428,13 +509,24 @@ func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options . } } } - - manifestURL, err := ms.ub.BuildManifestURL(ms.name, tag) + mediaType, p, err := m.Payload() if err != nil { return "", err } - mediaType, p, err := m.Payload() + if !tagged { + // generate a canonical digest and Put by digest + _, d, err := distribution.UnmarshalManifest(mediaType, p) + if err != nil { + return "", err + } + ref, err = reference.WithDigest(ref, d.Digest) + if err != nil { + return "", err + } + } + + manifestURL, err := ms.ub.BuildManifestURL(ref) if err != nil { return "", err } @@ -462,11 +554,15 @@ func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options . return dgst, nil } - return "", handleErrorResponse(resp) + return "", HandleErrorResponse(resp) } func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { - u, err := ms.ub.BuildManifestURL(ms.name, dgst.String()) + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return err + } + u, err := ms.ub.BuildManifestURL(ref) if err != nil { return err } @@ -484,7 +580,7 @@ func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { if SuccessStatus(resp.StatusCode) { return nil } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) } // todo(richardscothern): Restore interface and implementation with merge of #1050 @@ -493,7 +589,7 @@ func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { }*/ type blobs struct { - name string + name reference.Named ub *v2.URLBuilder client *http.Client @@ -531,7 +627,11 @@ func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { } func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst) + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return nil, err + } + blobURL, err := bs.ub.BuildBlobURL(ref) if err != nil { return nil, err } @@ -541,7 +641,7 @@ func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.Rea if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUnknown } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) }), nil } @@ -572,8 +672,57 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut return writer.Commit(ctx, desc) } -func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { - u, err := bs.ub.BuildBlobUploadURL(bs.name) +// createOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type createOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + } +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*createOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + +func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + var opts createOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + var values []url.Values + + if opts.Mount.ShouldMount { + values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) + } + + u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) + if err != nil { + return nil, err + } resp, err := bs.client.Post(u, "", nil) if err != nil { @@ -581,7 +730,14 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { } defer resp.Body.Close() - if SuccessStatus(resp.StatusCode) { + switch resp.StatusCode { + case http.StatusCreated: + desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) + if err != nil { + return nil, err + } + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + case http.StatusAccepted: // TODO(dmcgowan): Check for invalid UUID uuid := resp.Header.Get("Docker-Upload-UUID") location, err := sanitizeLocation(resp.Header.Get("Location"), u) @@ -596,8 +752,9 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { startedAt: time.Now(), location: location, }, nil + default: + return nil, HandleErrorResponse(resp) } - return nil, handleErrorResponse(resp) } func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { @@ -609,13 +766,17 @@ func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { } type blobStatter struct { - name string + name reference.Named ub *v2.URLBuilder client *http.Client } func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - u, err := bs.ub.BuildBlobURL(bs.name, dgst) + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := bs.ub.BuildBlobURL(ref) if err != nil { return distribution.Descriptor{}, err } @@ -645,7 +806,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi } else if resp.StatusCode == http.StatusNotFound { return distribution.Descriptor{}, distribution.ErrBlobUnknown } - return distribution.Descriptor{}, handleErrorResponse(resp) + return distribution.Descriptor{}, HandleErrorResponse(resp) } func buildCatalogValues(maxEntries int, last string) url.Values { @@ -663,7 +824,11 @@ func buildCatalogValues(maxEntries int, last string) url.Values { } func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst) + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return err + } + blobURL, err := bs.ub.BuildBlobURL(ref) if err != nil { return err } @@ -682,7 +847,7 @@ func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { if SuccessStatus(resp.StatusCode) { return nil } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) } func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go index b27b6c237..e1b17a03a 100644 --- a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go +++ b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go @@ -1,12 +1,22 @@ package transport import ( - "bufio" "errors" "fmt" "io" "net/http" "os" + "regexp" + "strconv" +) + +var ( + contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) + + // ErrWrongCodeForByteRange is returned if the client sends a request + // with a Range header but the server returns a 2xx or 3xx code other + // than 206 Partial Content. + ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") ) // ReadSeekCloser combines io.ReadSeeker with io.Closer. @@ -40,8 +50,6 @@ type httpReadSeeker struct { // rc is the remote read closer. rc io.ReadCloser - // brd is a buffer for internal buffered io. - brd *bufio.Reader // readerOffset tracks the offset as of the last read. readerOffset int64 // seekOffset allows Seek to override the offset. Seek changes @@ -58,7 +66,7 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { return 0, hrs.err } - // If we seeked to a different position, we need to reset the + // If we sought to a different position, we need to reset the // connection. This logic is here instead of Seek so that if // a seek is undone before the next read, the connection doesn't // need to be closed and reopened. A common example of this is @@ -79,11 +87,6 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { hrs.seekOffset += int64(n) hrs.readerOffset += int64(n) - // Simulate io.EOF error if we reach filesize. - if err == nil && hrs.size >= 0 && hrs.readerOffset >= hrs.size { - err = io.EOF - } - return n, err } @@ -92,8 +95,18 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { return 0, hrs.err } + lastReaderOffset := hrs.readerOffset + + if whence == os.SEEK_SET && hrs.rc == nil { + // If no request has been made yet, and we are seeking to an + // absolute position, set the read offset as well to avoid an + // unnecessary request. + hrs.readerOffset = offset + } + _, err := hrs.reader() if err != nil { + hrs.readerOffset = lastReaderOffset return 0, err } @@ -101,14 +114,14 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { switch whence { case os.SEEK_CUR: - newOffset += int64(offset) + newOffset += offset case os.SEEK_END: if hrs.size < 0 { return 0, errors.New("content length not known") } - newOffset = hrs.size + int64(offset) + newOffset = hrs.size + offset case os.SEEK_SET: - newOffset = int64(offset) + newOffset = offset } if newOffset < 0 { @@ -131,7 +144,6 @@ func (hrs *httpReadSeeker) Close() error { } hrs.rc = nil - hrs.brd = nil hrs.err = errors.New("httpLayer: closed") @@ -154,7 +166,7 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { } if hrs.rc != nil { - return hrs.brd, nil + return hrs.rc, nil } req, err := http.NewRequest("GET", hrs.url, nil) @@ -163,10 +175,8 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { } if hrs.readerOffset > 0 { - // TODO(stevvooe): Get this working correctly. - // If we are at different offset, issue a range request from there. - req.Header.Add("Range", "1-") + req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) // TODO: get context in here // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) } @@ -179,12 +189,55 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { // Normally would use client.SuccessStatus, but that would be a cyclic // import if resp.StatusCode >= 200 && resp.StatusCode <= 399 { - hrs.rc = resp.Body - if resp.StatusCode == http.StatusOK { + if hrs.readerOffset > 0 { + if resp.StatusCode != http.StatusPartialContent { + return nil, ErrWrongCodeForByteRange + } + + contentRange := resp.Header.Get("Content-Range") + if contentRange == "" { + return nil, errors.New("no Content-Range header found in HTTP 206 response") + } + + submatches := contentRangeRegexp.FindStringSubmatch(contentRange) + if len(submatches) < 4 { + return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) + } + + startByte, err := strconv.ParseUint(submatches[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) + } + + if startByte != uint64(hrs.readerOffset) { + return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) + } + + endByte, err := strconv.ParseUint(submatches[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) + } + + if submatches[3] == "*" { + hrs.size = -1 + } else { + size, err := strconv.ParseUint(submatches[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) + } + + if endByte+1 != size { + return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) + } + + hrs.size = int64(size) + } + } else if resp.StatusCode == http.StatusOK { hrs.size = resp.ContentLength } else { hrs.size = -1 } + hrs.rc = resp.Body } else { defer resp.Body.Close() if hrs.errorHandler != nil { @@ -193,11 +246,5 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) } - if hrs.brd == nil { - hrs.brd = bufio.NewReader(hrs.rc) - } else { - hrs.brd.Reset(hrs.rc) - } - - return hrs.brd, nil + return hrs.rc, nil }